• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 // Copyright (C) 2008 Juergen Beisert
4 
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi_bitbang.h>
22 #include <linux/types.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/property.h>
26 
27 #include <linux/platform_data/dma-imx.h>
28 
29 #define DRIVER_NAME "spi_imx"
30 
31 static bool use_dma = true;
32 module_param(use_dma, bool, 0644);
33 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
34 
35 #define MXC_RPM_TIMEOUT		2000 /* 2000ms */
36 
37 #define MXC_CSPIRXDATA		0x00
38 #define MXC_CSPITXDATA		0x04
39 #define MXC_CSPICTRL		0x08
40 #define MXC_CSPIINT		0x0c
41 #define MXC_RESET		0x1c
42 
43 /* generic defines to abstract from the different register layouts */
44 #define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
45 #define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */
46 #define MXC_INT_RDR	BIT(4) /* Receive date threshold interrupt */
47 
48 /* The maximum bytes that a sdma BD can transfer. */
49 #define MAX_SDMA_BD_BYTES (1 << 15)
50 #define MX51_ECSPI_CTRL_MAX_BURST	512
51 /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
52 #define MX53_MAX_TRANSFER_BYTES		512
53 
54 enum spi_imx_devtype {
55 	IMX1_CSPI,
56 	IMX21_CSPI,
57 	IMX27_CSPI,
58 	IMX31_CSPI,
59 	IMX35_CSPI,	/* CSPI on all i.mx except above */
60 	IMX51_ECSPI,	/* ECSPI on i.mx51 */
61 	IMX53_ECSPI,	/* ECSPI on i.mx53 and later */
62 };
63 
64 struct spi_imx_data;
65 
66 struct spi_imx_devtype_data {
67 	void (*intctrl)(struct spi_imx_data *, int);
68 	int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
69 	int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
70 	void (*trigger)(struct spi_imx_data *);
71 	int (*rx_available)(struct spi_imx_data *);
72 	void (*reset)(struct spi_imx_data *);
73 	void (*setup_wml)(struct spi_imx_data *);
74 	void (*disable)(struct spi_imx_data *);
75 	void (*disable_dma)(struct spi_imx_data *);
76 	bool has_dmamode;
77 	bool has_slavemode;
78 	unsigned int fifo_size;
79 	bool dynamic_burst;
80 	/*
81 	 * ERR009165 fixed or not:
82 	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
83 	 */
84 	bool tx_glitch_fixed;
85 	enum spi_imx_devtype devtype;
86 };
87 
88 struct spi_imx_data {
89 	struct spi_bitbang bitbang;
90 	struct device *dev;
91 
92 	struct completion xfer_done;
93 	void __iomem *base;
94 	unsigned long base_phys;
95 
96 	struct clk *clk_per;
97 	struct clk *clk_ipg;
98 	unsigned long spi_clk;
99 	unsigned int spi_bus_clk;
100 
101 	unsigned int bits_per_word;
102 	unsigned int spi_drctl;
103 
104 	unsigned int count, remainder;
105 	void (*tx)(struct spi_imx_data *);
106 	void (*rx)(struct spi_imx_data *);
107 	void *rx_buf;
108 	const void *tx_buf;
109 	unsigned int txfifo; /* number of words pushed in tx FIFO */
110 	unsigned int dynamic_burst;
111 
112 	/* Slave mode */
113 	bool slave_mode;
114 	bool slave_aborted;
115 	unsigned int slave_burst;
116 
117 	/* DMA */
118 	bool usedma;
119 	u32 wml;
120 	struct completion dma_rx_completion;
121 	struct completion dma_tx_completion;
122 
123 	const struct spi_imx_devtype_data *devtype_data;
124 };
125 
is_imx27_cspi(struct spi_imx_data * d)126 static inline int is_imx27_cspi(struct spi_imx_data *d)
127 {
128 	return d->devtype_data->devtype == IMX27_CSPI;
129 }
130 
is_imx35_cspi(struct spi_imx_data * d)131 static inline int is_imx35_cspi(struct spi_imx_data *d)
132 {
133 	return d->devtype_data->devtype == IMX35_CSPI;
134 }
135 
is_imx51_ecspi(struct spi_imx_data * d)136 static inline int is_imx51_ecspi(struct spi_imx_data *d)
137 {
138 	return d->devtype_data->devtype == IMX51_ECSPI;
139 }
140 
is_imx53_ecspi(struct spi_imx_data * d)141 static inline int is_imx53_ecspi(struct spi_imx_data *d)
142 {
143 	return d->devtype_data->devtype == IMX53_ECSPI;
144 }
145 
146 #define MXC_SPI_BUF_RX(type)						\
147 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)		\
148 {									\
149 	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);	\
150 									\
151 	if (spi_imx->rx_buf) {						\
152 		*(type *)spi_imx->rx_buf = val;				\
153 		spi_imx->rx_buf += sizeof(type);			\
154 	}								\
155 									\
156 	spi_imx->remainder -= sizeof(type);				\
157 }
158 
159 #define MXC_SPI_BUF_TX(type)						\
160 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)		\
161 {									\
162 	type val = 0;							\
163 									\
164 	if (spi_imx->tx_buf) {						\
165 		val = *(type *)spi_imx->tx_buf;				\
166 		spi_imx->tx_buf += sizeof(type);			\
167 	}								\
168 									\
169 	spi_imx->count -= sizeof(type);					\
170 									\
171 	writel(val, spi_imx->base + MXC_CSPITXDATA);			\
172 }
173 
174 MXC_SPI_BUF_RX(u8)
175 MXC_SPI_BUF_TX(u8)
176 MXC_SPI_BUF_RX(u16)
177 MXC_SPI_BUF_TX(u16)
178 MXC_SPI_BUF_RX(u32)
179 MXC_SPI_BUF_TX(u32)
180 
181 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
182  * (which is currently not the case in this driver)
183  */
184 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
185 	256, 384, 512, 768, 1024};
186 
187 /* MX21, MX27 */
spi_imx_clkdiv_1(unsigned int fin,unsigned int fspi,unsigned int max,unsigned int * fres)188 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
189 		unsigned int fspi, unsigned int max, unsigned int *fres)
190 {
191 	int i;
192 
193 	for (i = 2; i < max; i++)
194 		if (fspi * mxc_clkdivs[i] >= fin)
195 			break;
196 
197 	*fres = fin / mxc_clkdivs[i];
198 	return i;
199 }
200 
201 /* MX1, MX31, MX35, MX51 CSPI */
spi_imx_clkdiv_2(unsigned int fin,unsigned int fspi,unsigned int * fres)202 static unsigned int spi_imx_clkdiv_2(unsigned int fin,
203 		unsigned int fspi, unsigned int *fres)
204 {
205 	int i, div = 4;
206 
207 	for (i = 0; i < 7; i++) {
208 		if (fspi * div >= fin)
209 			goto out;
210 		div <<= 1;
211 	}
212 
213 out:
214 	*fres = fin / div;
215 	return i;
216 }
217 
spi_imx_bytes_per_word(const int bits_per_word)218 static int spi_imx_bytes_per_word(const int bits_per_word)
219 {
220 	if (bits_per_word <= 8)
221 		return 1;
222 	else if (bits_per_word <= 16)
223 		return 2;
224 	else
225 		return 4;
226 }
227 
spi_imx_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * transfer)228 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
229 			 struct spi_transfer *transfer)
230 {
231 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
232 
233 	if (!use_dma || master->fallback)
234 		return false;
235 
236 	if (!master->dma_rx)
237 		return false;
238 
239 	if (spi_imx->slave_mode)
240 		return false;
241 
242 	if (transfer->len < spi_imx->devtype_data->fifo_size)
243 		return false;
244 
245 	spi_imx->dynamic_burst = 0;
246 
247 	return true;
248 }
249 
250 /*
251  * Note the number of natively supported chip selects for MX51 is 4. Some
252  * devices may have less actual SS pins but the register map supports 4. When
253  * using gpio chip selects the cs values passed into the macros below can go
254  * outside the range 0 - 3. We therefore need to limit the cs value to avoid
255  * corrupting bits outside the allocated locations.
256  *
257  * The simplest way to do this is to just mask the cs bits to 2 bits. This
258  * still allows all 4 native chip selects to work as well as gpio chip selects
259  * (which can use any of the 4 chip select configurations).
260  */
261 
262 #define MX51_ECSPI_CTRL		0x08
263 #define MX51_ECSPI_CTRL_ENABLE		(1 <<  0)
264 #define MX51_ECSPI_CTRL_XCH		(1 <<  2)
265 #define MX51_ECSPI_CTRL_SMC		(1 << 3)
266 #define MX51_ECSPI_CTRL_MODE_MASK	(0xf << 4)
267 #define MX51_ECSPI_CTRL_DRCTL(drctl)	((drctl) << 16)
268 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET	8
269 #define MX51_ECSPI_CTRL_PREDIV_OFFSET	12
270 #define MX51_ECSPI_CTRL_CS(cs)		((cs & 3) << 18)
271 #define MX51_ECSPI_CTRL_BL_OFFSET	20
272 #define MX51_ECSPI_CTRL_BL_MASK		(0xfff << 20)
273 
274 #define MX51_ECSPI_CONFIG	0x0c
275 #define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs & 3) +  0))
276 #define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs & 3) +  4))
277 #define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs & 3) +  8))
278 #define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs & 3) + 12))
279 #define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs & 3) + 20))
280 
281 #define MX51_ECSPI_INT		0x10
282 #define MX51_ECSPI_INT_TEEN		(1 <<  0)
283 #define MX51_ECSPI_INT_RREN		(1 <<  3)
284 #define MX51_ECSPI_INT_RDREN		(1 <<  4)
285 
286 #define MX51_ECSPI_DMA		0x14
287 #define MX51_ECSPI_DMA_TX_WML(wml)	((wml) & 0x3f)
288 #define MX51_ECSPI_DMA_RX_WML(wml)	(((wml) & 0x3f) << 16)
289 #define MX51_ECSPI_DMA_RXT_WML(wml)	(((wml) & 0x3f) << 24)
290 
291 #define MX51_ECSPI_DMA_TEDEN		(1 << 7)
292 #define MX51_ECSPI_DMA_RXDEN		(1 << 23)
293 #define MX51_ECSPI_DMA_RXTDEN		(1 << 31)
294 
295 #define MX51_ECSPI_STAT		0x18
296 #define MX51_ECSPI_STAT_RR		(1 <<  3)
297 
298 #define MX51_ECSPI_TESTREG	0x20
299 #define MX51_ECSPI_TESTREG_LBC	BIT(31)
300 
spi_imx_buf_rx_swap_u32(struct spi_imx_data * spi_imx)301 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
302 {
303 	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
304 #ifdef __LITTLE_ENDIAN
305 	unsigned int bytes_per_word;
306 #endif
307 
308 	if (spi_imx->rx_buf) {
309 #ifdef __LITTLE_ENDIAN
310 		bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
311 		if (bytes_per_word == 1)
312 			val = cpu_to_be32(val);
313 		else if (bytes_per_word == 2)
314 			val = (val << 16) | (val >> 16);
315 #endif
316 		*(u32 *)spi_imx->rx_buf = val;
317 		spi_imx->rx_buf += sizeof(u32);
318 	}
319 
320 	spi_imx->remainder -= sizeof(u32);
321 }
322 
spi_imx_buf_rx_swap(struct spi_imx_data * spi_imx)323 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
324 {
325 	int unaligned;
326 	u32 val;
327 
328 	unaligned = spi_imx->remainder % 4;
329 
330 	if (!unaligned) {
331 		spi_imx_buf_rx_swap_u32(spi_imx);
332 		return;
333 	}
334 
335 	if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
336 		spi_imx_buf_rx_u16(spi_imx);
337 		return;
338 	}
339 
340 	val = readl(spi_imx->base + MXC_CSPIRXDATA);
341 
342 	while (unaligned--) {
343 		if (spi_imx->rx_buf) {
344 			*(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
345 			spi_imx->rx_buf++;
346 		}
347 		spi_imx->remainder--;
348 	}
349 }
350 
spi_imx_buf_tx_swap_u32(struct spi_imx_data * spi_imx)351 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
352 {
353 	u32 val = 0;
354 #ifdef __LITTLE_ENDIAN
355 	unsigned int bytes_per_word;
356 #endif
357 
358 	if (spi_imx->tx_buf) {
359 		val = *(u32 *)spi_imx->tx_buf;
360 		spi_imx->tx_buf += sizeof(u32);
361 	}
362 
363 	spi_imx->count -= sizeof(u32);
364 #ifdef __LITTLE_ENDIAN
365 	bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
366 
367 	if (bytes_per_word == 1)
368 		val = cpu_to_be32(val);
369 	else if (bytes_per_word == 2)
370 		val = (val << 16) | (val >> 16);
371 #endif
372 	writel(val, spi_imx->base + MXC_CSPITXDATA);
373 }
374 
spi_imx_buf_tx_swap(struct spi_imx_data * spi_imx)375 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
376 {
377 	int unaligned;
378 	u32 val = 0;
379 
380 	unaligned = spi_imx->count % 4;
381 
382 	if (!unaligned) {
383 		spi_imx_buf_tx_swap_u32(spi_imx);
384 		return;
385 	}
386 
387 	if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
388 		spi_imx_buf_tx_u16(spi_imx);
389 		return;
390 	}
391 
392 	while (unaligned--) {
393 		if (spi_imx->tx_buf) {
394 			val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
395 			spi_imx->tx_buf++;
396 		}
397 		spi_imx->count--;
398 	}
399 
400 	writel(val, spi_imx->base + MXC_CSPITXDATA);
401 }
402 
mx53_ecspi_rx_slave(struct spi_imx_data * spi_imx)403 static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
404 {
405 	u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
406 
407 	if (spi_imx->rx_buf) {
408 		int n_bytes = spi_imx->slave_burst % sizeof(val);
409 
410 		if (!n_bytes)
411 			n_bytes = sizeof(val);
412 
413 		memcpy(spi_imx->rx_buf,
414 		       ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
415 
416 		spi_imx->rx_buf += n_bytes;
417 		spi_imx->slave_burst -= n_bytes;
418 	}
419 
420 	spi_imx->remainder -= sizeof(u32);
421 }
422 
mx53_ecspi_tx_slave(struct spi_imx_data * spi_imx)423 static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
424 {
425 	u32 val = 0;
426 	int n_bytes = spi_imx->count % sizeof(val);
427 
428 	if (!n_bytes)
429 		n_bytes = sizeof(val);
430 
431 	if (spi_imx->tx_buf) {
432 		memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
433 		       spi_imx->tx_buf, n_bytes);
434 		val = cpu_to_be32(val);
435 		spi_imx->tx_buf += n_bytes;
436 	}
437 
438 	spi_imx->count -= n_bytes;
439 
440 	writel(val, spi_imx->base + MXC_CSPITXDATA);
441 }
442 
443 /* MX51 eCSPI */
mx51_ecspi_clkdiv(struct spi_imx_data * spi_imx,unsigned int fspi,unsigned int * fres)444 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
445 				      unsigned int fspi, unsigned int *fres)
446 {
447 	/*
448 	 * there are two 4-bit dividers, the pre-divider divides by
449 	 * $pre, the post-divider by 2^$post
450 	 */
451 	unsigned int pre, post;
452 	unsigned int fin = spi_imx->spi_clk;
453 
454 	fspi = min(fspi, fin);
455 
456 	post = fls(fin) - fls(fspi);
457 	if (fin > fspi << post)
458 		post++;
459 
460 	/* now we have: (fin <= fspi << post) with post being minimal */
461 
462 	post = max(4U, post) - 4;
463 	if (unlikely(post > 0xf)) {
464 		dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
465 				fspi, fin);
466 		return 0xff;
467 	}
468 
469 	pre = DIV_ROUND_UP(fin, fspi << post) - 1;
470 
471 	dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
472 			__func__, fin, fspi, post, pre);
473 
474 	/* Resulting frequency for the SCLK line. */
475 	*fres = (fin / (pre + 1)) >> post;
476 
477 	return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
478 		(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
479 }
480 
mx51_ecspi_intctrl(struct spi_imx_data * spi_imx,int enable)481 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
482 {
483 	unsigned val = 0;
484 
485 	if (enable & MXC_INT_TE)
486 		val |= MX51_ECSPI_INT_TEEN;
487 
488 	if (enable & MXC_INT_RR)
489 		val |= MX51_ECSPI_INT_RREN;
490 
491 	if (enable & MXC_INT_RDR)
492 		val |= MX51_ECSPI_INT_RDREN;
493 
494 	writel(val, spi_imx->base + MX51_ECSPI_INT);
495 }
496 
mx51_ecspi_trigger(struct spi_imx_data * spi_imx)497 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
498 {
499 	u32 reg;
500 
501 	reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
502 	reg |= MX51_ECSPI_CTRL_XCH;
503 	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
504 }
505 
mx51_disable_dma(struct spi_imx_data * spi_imx)506 static void mx51_disable_dma(struct spi_imx_data *spi_imx)
507 {
508 	writel(0, spi_imx->base + MX51_ECSPI_DMA);
509 }
510 
mx51_ecspi_disable(struct spi_imx_data * spi_imx)511 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
512 {
513 	u32 ctrl;
514 
515 	ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
516 	ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
517 	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
518 }
519 
mx51_ecspi_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)520 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
521 				      struct spi_message *msg)
522 {
523 	struct spi_device *spi = msg->spi;
524 	struct spi_transfer *xfer;
525 	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
526 	u32 min_speed_hz = ~0U;
527 	u32 testreg, delay;
528 	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
529 
530 	/* set Master or Slave mode */
531 	if (spi_imx->slave_mode)
532 		ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
533 	else
534 		ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
535 
536 	/*
537 	 * Enable SPI_RDY handling (falling edge/level triggered).
538 	 */
539 	if (spi->mode & SPI_READY)
540 		ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
541 
542 	/* set chip select to use */
543 	ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
544 
545 	/*
546 	 * The ctrl register must be written first, with the EN bit set other
547 	 * registers must not be written to.
548 	 */
549 	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
550 
551 	testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
552 	if (spi->mode & SPI_LOOP)
553 		testreg |= MX51_ECSPI_TESTREG_LBC;
554 	else
555 		testreg &= ~MX51_ECSPI_TESTREG_LBC;
556 	writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
557 
558 	/*
559 	 * eCSPI burst completion by Chip Select signal in Slave mode
560 	 * is not functional for imx53 Soc, config SPI burst completed when
561 	 * BURST_LENGTH + 1 bits are received
562 	 */
563 	if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
564 		cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
565 	else
566 		cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
567 
568 	if (spi->mode & SPI_CPHA)
569 		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
570 	else
571 		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
572 
573 	if (spi->mode & SPI_CPOL) {
574 		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
575 		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
576 	} else {
577 		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
578 		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
579 	}
580 
581 	if (spi->mode & SPI_CS_HIGH)
582 		cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
583 	else
584 		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
585 
586 	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
587 
588 	/*
589 	 * Wait until the changes in the configuration register CONFIGREG
590 	 * propagate into the hardware. It takes exactly one tick of the
591 	 * SCLK clock, but we will wait two SCLK clock just to be sure. The
592 	 * effect of the delay it takes for the hardware to apply changes
593 	 * is noticable if the SCLK clock run very slow. In such a case, if
594 	 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
595 	 * be asserted before the SCLK polarity changes, which would disrupt
596 	 * the SPI communication as the device on the other end would consider
597 	 * the change of SCLK polarity as a clock tick already.
598 	 *
599 	 * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
600 	 * callback, iterate over all the transfers in spi_message, find the
601 	 * one with lowest bus frequency, and use that bus frequency for the
602 	 * delay calculation. In case all transfers have speed_hz == 0, then
603 	 * min_speed_hz is ~0 and the resulting delay is zero.
604 	 */
605 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
606 		if (!xfer->speed_hz)
607 			continue;
608 		min_speed_hz = min(xfer->speed_hz, min_speed_hz);
609 	}
610 
611 	delay = (2 * 1000000) / min_speed_hz;
612 	if (likely(delay < 10))	/* SCLK is faster than 200 kHz */
613 		udelay(delay);
614 	else			/* SCLK is _very_ slow */
615 		usleep_range(delay, delay + 10);
616 
617 	return 0;
618 }
619 
mx51_ecspi_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)620 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
621 				       struct spi_device *spi)
622 {
623 	u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
624 	u32 clk;
625 
626 	/* Clear BL field and set the right value */
627 	ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
628 	if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
629 		ctrl |= (spi_imx->slave_burst * 8 - 1)
630 			<< MX51_ECSPI_CTRL_BL_OFFSET;
631 	else
632 		ctrl |= (spi_imx->bits_per_word - 1)
633 			<< MX51_ECSPI_CTRL_BL_OFFSET;
634 
635 	/* set clock speed */
636 	ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
637 		  0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
638 	ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
639 	spi_imx->spi_bus_clk = clk;
640 
641 	/*
642 	 * ERR009165: work in XHC mode instead of SMC as PIO on the chips
643 	 * before i.mx6ul.
644 	 */
645 	if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
646 		ctrl |= MX51_ECSPI_CTRL_SMC;
647 	else
648 		ctrl &= ~MX51_ECSPI_CTRL_SMC;
649 
650 	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
651 
652 	return 0;
653 }
654 
mx51_setup_wml(struct spi_imx_data * spi_imx)655 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
656 {
657 	u32 tx_wml = 0;
658 
659 	if (spi_imx->devtype_data->tx_glitch_fixed)
660 		tx_wml = spi_imx->wml;
661 	/*
662 	 * Configure the DMA register: setup the watermark
663 	 * and enable DMA request.
664 	 */
665 	writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
666 		MX51_ECSPI_DMA_TX_WML(tx_wml) |
667 		MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
668 		MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
669 		MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
670 }
671 
mx51_ecspi_rx_available(struct spi_imx_data * spi_imx)672 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
673 {
674 	return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
675 }
676 
mx51_ecspi_reset(struct spi_imx_data * spi_imx)677 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
678 {
679 	/* drain receive buffer */
680 	while (mx51_ecspi_rx_available(spi_imx))
681 		readl(spi_imx->base + MXC_CSPIRXDATA);
682 }
683 
684 #define MX31_INTREG_TEEN	(1 << 0)
685 #define MX31_INTREG_RREN	(1 << 3)
686 
687 #define MX31_CSPICTRL_ENABLE	(1 << 0)
688 #define MX31_CSPICTRL_MASTER	(1 << 1)
689 #define MX31_CSPICTRL_XCH	(1 << 2)
690 #define MX31_CSPICTRL_SMC	(1 << 3)
691 #define MX31_CSPICTRL_POL	(1 << 4)
692 #define MX31_CSPICTRL_PHA	(1 << 5)
693 #define MX31_CSPICTRL_SSCTL	(1 << 6)
694 #define MX31_CSPICTRL_SSPOL	(1 << 7)
695 #define MX31_CSPICTRL_BC_SHIFT	8
696 #define MX35_CSPICTRL_BL_SHIFT	20
697 #define MX31_CSPICTRL_CS_SHIFT	24
698 #define MX35_CSPICTRL_CS_SHIFT	12
699 #define MX31_CSPICTRL_DR_SHIFT	16
700 
701 #define MX31_CSPI_DMAREG	0x10
702 #define MX31_DMAREG_RH_DEN	(1<<4)
703 #define MX31_DMAREG_TH_DEN	(1<<1)
704 
705 #define MX31_CSPISTATUS		0x14
706 #define MX31_STATUS_RR		(1 << 3)
707 
708 #define MX31_CSPI_TESTREG	0x1C
709 #define MX31_TEST_LBC		(1 << 14)
710 
711 /* These functions also work for the i.MX35, but be aware that
712  * the i.MX35 has a slightly different register layout for bits
713  * we do not use here.
714  */
mx31_intctrl(struct spi_imx_data * spi_imx,int enable)715 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
716 {
717 	unsigned int val = 0;
718 
719 	if (enable & MXC_INT_TE)
720 		val |= MX31_INTREG_TEEN;
721 	if (enable & MXC_INT_RR)
722 		val |= MX31_INTREG_RREN;
723 
724 	writel(val, spi_imx->base + MXC_CSPIINT);
725 }
726 
mx31_trigger(struct spi_imx_data * spi_imx)727 static void mx31_trigger(struct spi_imx_data *spi_imx)
728 {
729 	unsigned int reg;
730 
731 	reg = readl(spi_imx->base + MXC_CSPICTRL);
732 	reg |= MX31_CSPICTRL_XCH;
733 	writel(reg, spi_imx->base + MXC_CSPICTRL);
734 }
735 
mx31_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)736 static int mx31_prepare_message(struct spi_imx_data *spi_imx,
737 				struct spi_message *msg)
738 {
739 	return 0;
740 }
741 
mx31_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)742 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
743 				 struct spi_device *spi)
744 {
745 	unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
746 	unsigned int clk;
747 
748 	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
749 		MX31_CSPICTRL_DR_SHIFT;
750 	spi_imx->spi_bus_clk = clk;
751 
752 	if (is_imx35_cspi(spi_imx)) {
753 		reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
754 		reg |= MX31_CSPICTRL_SSCTL;
755 	} else {
756 		reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
757 	}
758 
759 	if (spi->mode & SPI_CPHA)
760 		reg |= MX31_CSPICTRL_PHA;
761 	if (spi->mode & SPI_CPOL)
762 		reg |= MX31_CSPICTRL_POL;
763 	if (spi->mode & SPI_CS_HIGH)
764 		reg |= MX31_CSPICTRL_SSPOL;
765 	if (!spi->cs_gpiod)
766 		reg |= (spi->chip_select) <<
767 			(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
768 						  MX31_CSPICTRL_CS_SHIFT);
769 
770 	if (spi_imx->usedma)
771 		reg |= MX31_CSPICTRL_SMC;
772 
773 	writel(reg, spi_imx->base + MXC_CSPICTRL);
774 
775 	reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
776 	if (spi->mode & SPI_LOOP)
777 		reg |= MX31_TEST_LBC;
778 	else
779 		reg &= ~MX31_TEST_LBC;
780 	writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
781 
782 	if (spi_imx->usedma) {
783 		/*
784 		 * configure DMA requests when RXFIFO is half full and
785 		 * when TXFIFO is half empty
786 		 */
787 		writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
788 			spi_imx->base + MX31_CSPI_DMAREG);
789 	}
790 
791 	return 0;
792 }
793 
mx31_rx_available(struct spi_imx_data * spi_imx)794 static int mx31_rx_available(struct spi_imx_data *spi_imx)
795 {
796 	return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
797 }
798 
mx31_reset(struct spi_imx_data * spi_imx)799 static void mx31_reset(struct spi_imx_data *spi_imx)
800 {
801 	/* drain receive buffer */
802 	while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
803 		readl(spi_imx->base + MXC_CSPIRXDATA);
804 }
805 
806 #define MX21_INTREG_RR		(1 << 4)
807 #define MX21_INTREG_TEEN	(1 << 9)
808 #define MX21_INTREG_RREN	(1 << 13)
809 
810 #define MX21_CSPICTRL_POL	(1 << 5)
811 #define MX21_CSPICTRL_PHA	(1 << 6)
812 #define MX21_CSPICTRL_SSPOL	(1 << 8)
813 #define MX21_CSPICTRL_XCH	(1 << 9)
814 #define MX21_CSPICTRL_ENABLE	(1 << 10)
815 #define MX21_CSPICTRL_MASTER	(1 << 11)
816 #define MX21_CSPICTRL_DR_SHIFT	14
817 #define MX21_CSPICTRL_CS_SHIFT	19
818 
mx21_intctrl(struct spi_imx_data * spi_imx,int enable)819 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
820 {
821 	unsigned int val = 0;
822 
823 	if (enable & MXC_INT_TE)
824 		val |= MX21_INTREG_TEEN;
825 	if (enable & MXC_INT_RR)
826 		val |= MX21_INTREG_RREN;
827 
828 	writel(val, spi_imx->base + MXC_CSPIINT);
829 }
830 
mx21_trigger(struct spi_imx_data * spi_imx)831 static void mx21_trigger(struct spi_imx_data *spi_imx)
832 {
833 	unsigned int reg;
834 
835 	reg = readl(spi_imx->base + MXC_CSPICTRL);
836 	reg |= MX21_CSPICTRL_XCH;
837 	writel(reg, spi_imx->base + MXC_CSPICTRL);
838 }
839 
mx21_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)840 static int mx21_prepare_message(struct spi_imx_data *spi_imx,
841 				struct spi_message *msg)
842 {
843 	return 0;
844 }
845 
mx21_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)846 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
847 				 struct spi_device *spi)
848 {
849 	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
850 	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
851 	unsigned int clk;
852 
853 	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
854 		<< MX21_CSPICTRL_DR_SHIFT;
855 	spi_imx->spi_bus_clk = clk;
856 
857 	reg |= spi_imx->bits_per_word - 1;
858 
859 	if (spi->mode & SPI_CPHA)
860 		reg |= MX21_CSPICTRL_PHA;
861 	if (spi->mode & SPI_CPOL)
862 		reg |= MX21_CSPICTRL_POL;
863 	if (spi->mode & SPI_CS_HIGH)
864 		reg |= MX21_CSPICTRL_SSPOL;
865 	if (!spi->cs_gpiod)
866 		reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
867 
868 	writel(reg, spi_imx->base + MXC_CSPICTRL);
869 
870 	return 0;
871 }
872 
mx21_rx_available(struct spi_imx_data * spi_imx)873 static int mx21_rx_available(struct spi_imx_data *spi_imx)
874 {
875 	return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
876 }
877 
mx21_reset(struct spi_imx_data * spi_imx)878 static void mx21_reset(struct spi_imx_data *spi_imx)
879 {
880 	writel(1, spi_imx->base + MXC_RESET);
881 }
882 
883 #define MX1_INTREG_RR		(1 << 3)
884 #define MX1_INTREG_TEEN		(1 << 8)
885 #define MX1_INTREG_RREN		(1 << 11)
886 
887 #define MX1_CSPICTRL_POL	(1 << 4)
888 #define MX1_CSPICTRL_PHA	(1 << 5)
889 #define MX1_CSPICTRL_XCH	(1 << 8)
890 #define MX1_CSPICTRL_ENABLE	(1 << 9)
891 #define MX1_CSPICTRL_MASTER	(1 << 10)
892 #define MX1_CSPICTRL_DR_SHIFT	13
893 
mx1_intctrl(struct spi_imx_data * spi_imx,int enable)894 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
895 {
896 	unsigned int val = 0;
897 
898 	if (enable & MXC_INT_TE)
899 		val |= MX1_INTREG_TEEN;
900 	if (enable & MXC_INT_RR)
901 		val |= MX1_INTREG_RREN;
902 
903 	writel(val, spi_imx->base + MXC_CSPIINT);
904 }
905 
mx1_trigger(struct spi_imx_data * spi_imx)906 static void mx1_trigger(struct spi_imx_data *spi_imx)
907 {
908 	unsigned int reg;
909 
910 	reg = readl(spi_imx->base + MXC_CSPICTRL);
911 	reg |= MX1_CSPICTRL_XCH;
912 	writel(reg, spi_imx->base + MXC_CSPICTRL);
913 }
914 
mx1_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)915 static int mx1_prepare_message(struct spi_imx_data *spi_imx,
916 			       struct spi_message *msg)
917 {
918 	return 0;
919 }
920 
mx1_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)921 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
922 				struct spi_device *spi)
923 {
924 	unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
925 	unsigned int clk;
926 
927 	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
928 		MX1_CSPICTRL_DR_SHIFT;
929 	spi_imx->spi_bus_clk = clk;
930 
931 	reg |= spi_imx->bits_per_word - 1;
932 
933 	if (spi->mode & SPI_CPHA)
934 		reg |= MX1_CSPICTRL_PHA;
935 	if (spi->mode & SPI_CPOL)
936 		reg |= MX1_CSPICTRL_POL;
937 
938 	writel(reg, spi_imx->base + MXC_CSPICTRL);
939 
940 	return 0;
941 }
942 
mx1_rx_available(struct spi_imx_data * spi_imx)943 static int mx1_rx_available(struct spi_imx_data *spi_imx)
944 {
945 	return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
946 }
947 
mx1_reset(struct spi_imx_data * spi_imx)948 static void mx1_reset(struct spi_imx_data *spi_imx)
949 {
950 	writel(1, spi_imx->base + MXC_RESET);
951 }
952 
953 static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
954 	.intctrl = mx1_intctrl,
955 	.prepare_message = mx1_prepare_message,
956 	.prepare_transfer = mx1_prepare_transfer,
957 	.trigger = mx1_trigger,
958 	.rx_available = mx1_rx_available,
959 	.reset = mx1_reset,
960 	.fifo_size = 8,
961 	.has_dmamode = false,
962 	.dynamic_burst = false,
963 	.has_slavemode = false,
964 	.devtype = IMX1_CSPI,
965 };
966 
967 static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
968 	.intctrl = mx21_intctrl,
969 	.prepare_message = mx21_prepare_message,
970 	.prepare_transfer = mx21_prepare_transfer,
971 	.trigger = mx21_trigger,
972 	.rx_available = mx21_rx_available,
973 	.reset = mx21_reset,
974 	.fifo_size = 8,
975 	.has_dmamode = false,
976 	.dynamic_burst = false,
977 	.has_slavemode = false,
978 	.devtype = IMX21_CSPI,
979 };
980 
981 static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
982 	/* i.mx27 cspi shares the functions with i.mx21 one */
983 	.intctrl = mx21_intctrl,
984 	.prepare_message = mx21_prepare_message,
985 	.prepare_transfer = mx21_prepare_transfer,
986 	.trigger = mx21_trigger,
987 	.rx_available = mx21_rx_available,
988 	.reset = mx21_reset,
989 	.fifo_size = 8,
990 	.has_dmamode = false,
991 	.dynamic_burst = false,
992 	.has_slavemode = false,
993 	.devtype = IMX27_CSPI,
994 };
995 
996 static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
997 	.intctrl = mx31_intctrl,
998 	.prepare_message = mx31_prepare_message,
999 	.prepare_transfer = mx31_prepare_transfer,
1000 	.trigger = mx31_trigger,
1001 	.rx_available = mx31_rx_available,
1002 	.reset = mx31_reset,
1003 	.fifo_size = 8,
1004 	.has_dmamode = false,
1005 	.dynamic_burst = false,
1006 	.has_slavemode = false,
1007 	.devtype = IMX31_CSPI,
1008 };
1009 
1010 static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
1011 	/* i.mx35 and later cspi shares the functions with i.mx31 one */
1012 	.intctrl = mx31_intctrl,
1013 	.prepare_message = mx31_prepare_message,
1014 	.prepare_transfer = mx31_prepare_transfer,
1015 	.trigger = mx31_trigger,
1016 	.rx_available = mx31_rx_available,
1017 	.reset = mx31_reset,
1018 	.fifo_size = 8,
1019 	.has_dmamode = true,
1020 	.dynamic_burst = false,
1021 	.has_slavemode = false,
1022 	.devtype = IMX35_CSPI,
1023 };
1024 
1025 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1026 	.intctrl = mx51_ecspi_intctrl,
1027 	.prepare_message = mx51_ecspi_prepare_message,
1028 	.prepare_transfer = mx51_ecspi_prepare_transfer,
1029 	.trigger = mx51_ecspi_trigger,
1030 	.rx_available = mx51_ecspi_rx_available,
1031 	.reset = mx51_ecspi_reset,
1032 	.setup_wml = mx51_setup_wml,
1033 	.disable_dma = mx51_disable_dma,
1034 	.fifo_size = 64,
1035 	.has_dmamode = true,
1036 	.dynamic_burst = true,
1037 	.has_slavemode = true,
1038 	.disable = mx51_ecspi_disable,
1039 	.devtype = IMX51_ECSPI,
1040 };
1041 
1042 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1043 	.intctrl = mx51_ecspi_intctrl,
1044 	.prepare_message = mx51_ecspi_prepare_message,
1045 	.prepare_transfer = mx51_ecspi_prepare_transfer,
1046 	.trigger = mx51_ecspi_trigger,
1047 	.rx_available = mx51_ecspi_rx_available,
1048 	.disable_dma = mx51_disable_dma,
1049 	.reset = mx51_ecspi_reset,
1050 	.fifo_size = 64,
1051 	.has_dmamode = true,
1052 	.has_slavemode = true,
1053 	.disable = mx51_ecspi_disable,
1054 	.devtype = IMX53_ECSPI,
1055 };
1056 
1057 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
1058 	.intctrl = mx51_ecspi_intctrl,
1059 	.prepare_message = mx51_ecspi_prepare_message,
1060 	.prepare_transfer = mx51_ecspi_prepare_transfer,
1061 	.trigger = mx51_ecspi_trigger,
1062 	.rx_available = mx51_ecspi_rx_available,
1063 	.reset = mx51_ecspi_reset,
1064 	.setup_wml = mx51_setup_wml,
1065 	.fifo_size = 64,
1066 	.has_dmamode = true,
1067 	.dynamic_burst = true,
1068 	.has_slavemode = true,
1069 	.tx_glitch_fixed = true,
1070 	.disable = mx51_ecspi_disable,
1071 	.devtype = IMX51_ECSPI,
1072 };
1073 
1074 static const struct of_device_id spi_imx_dt_ids[] = {
1075 	{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1076 	{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1077 	{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1078 	{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1079 	{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1080 	{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1081 	{ .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1082 	{ .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
1083 	{ /* sentinel */ }
1084 };
1085 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1086 
spi_imx_set_burst_len(struct spi_imx_data * spi_imx,int n_bits)1087 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1088 {
1089 	u32 ctrl;
1090 
1091 	ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1092 	ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1093 	ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1094 	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1095 }
1096 
spi_imx_push(struct spi_imx_data * spi_imx)1097 static void spi_imx_push(struct spi_imx_data *spi_imx)
1098 {
1099 	unsigned int burst_len;
1100 
1101 	/*
1102 	 * Reload the FIFO when the remaining bytes to be transferred in the
1103 	 * current burst is 0. This only applies when bits_per_word is a
1104 	 * multiple of 8.
1105 	 */
1106 	if (!spi_imx->remainder) {
1107 		if (spi_imx->dynamic_burst) {
1108 
1109 			/* We need to deal unaligned data first */
1110 			burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1111 
1112 			if (!burst_len)
1113 				burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1114 
1115 			spi_imx_set_burst_len(spi_imx, burst_len * 8);
1116 
1117 			spi_imx->remainder = burst_len;
1118 		} else {
1119 			spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1120 		}
1121 	}
1122 
1123 	while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1124 		if (!spi_imx->count)
1125 			break;
1126 		if (spi_imx->dynamic_burst &&
1127 		    spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
1128 			break;
1129 		spi_imx->tx(spi_imx);
1130 		spi_imx->txfifo++;
1131 	}
1132 
1133 	if (!spi_imx->slave_mode)
1134 		spi_imx->devtype_data->trigger(spi_imx);
1135 }
1136 
spi_imx_isr(int irq,void * dev_id)1137 static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1138 {
1139 	struct spi_imx_data *spi_imx = dev_id;
1140 
1141 	while (spi_imx->txfifo &&
1142 	       spi_imx->devtype_data->rx_available(spi_imx)) {
1143 		spi_imx->rx(spi_imx);
1144 		spi_imx->txfifo--;
1145 	}
1146 
1147 	if (spi_imx->count) {
1148 		spi_imx_push(spi_imx);
1149 		return IRQ_HANDLED;
1150 	}
1151 
1152 	if (spi_imx->txfifo) {
1153 		/* No data left to push, but still waiting for rx data,
1154 		 * enable receive data available interrupt.
1155 		 */
1156 		spi_imx->devtype_data->intctrl(
1157 				spi_imx, MXC_INT_RR);
1158 		return IRQ_HANDLED;
1159 	}
1160 
1161 	spi_imx->devtype_data->intctrl(spi_imx, 0);
1162 	complete(&spi_imx->xfer_done);
1163 
1164 	return IRQ_HANDLED;
1165 }
1166 
spi_imx_dma_configure(struct spi_master * master)1167 static int spi_imx_dma_configure(struct spi_master *master)
1168 {
1169 	int ret;
1170 	enum dma_slave_buswidth buswidth;
1171 	struct dma_slave_config rx = {}, tx = {};
1172 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1173 
1174 	switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1175 	case 4:
1176 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1177 		break;
1178 	case 2:
1179 		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1180 		break;
1181 	case 1:
1182 		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1183 		break;
1184 	default:
1185 		return -EINVAL;
1186 	}
1187 
1188 	tx.direction = DMA_MEM_TO_DEV;
1189 	tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1190 	tx.dst_addr_width = buswidth;
1191 	tx.dst_maxburst = spi_imx->wml;
1192 	ret = dmaengine_slave_config(master->dma_tx, &tx);
1193 	if (ret) {
1194 		dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1195 		return ret;
1196 	}
1197 
1198 	rx.direction = DMA_DEV_TO_MEM;
1199 	rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1200 	rx.src_addr_width = buswidth;
1201 	rx.src_maxburst = spi_imx->wml;
1202 	ret = dmaengine_slave_config(master->dma_rx, &rx);
1203 	if (ret) {
1204 		dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1205 		return ret;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
spi_imx_setupxfer(struct spi_device * spi,struct spi_transfer * t)1211 static int spi_imx_setupxfer(struct spi_device *spi,
1212 				 struct spi_transfer *t)
1213 {
1214 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1215 
1216 	if (!t)
1217 		return 0;
1218 
1219 	if (!t->speed_hz) {
1220 		if (!spi->max_speed_hz) {
1221 			dev_err(&spi->dev, "no speed_hz provided!\n");
1222 			return -EINVAL;
1223 		}
1224 		dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1225 		spi_imx->spi_bus_clk = spi->max_speed_hz;
1226 	} else
1227 		spi_imx->spi_bus_clk = t->speed_hz;
1228 
1229 	spi_imx->bits_per_word = t->bits_per_word;
1230 
1231 	/*
1232 	 * Initialize the functions for transfer. To transfer non byte-aligned
1233 	 * words, we have to use multiple word-size bursts, we can't use
1234 	 * dynamic_burst in that case.
1235 	 */
1236 	if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1237 	    !(spi->mode & SPI_CS_WORD) &&
1238 	    (spi_imx->bits_per_word == 8 ||
1239 	    spi_imx->bits_per_word == 16 ||
1240 	    spi_imx->bits_per_word == 32)) {
1241 
1242 		spi_imx->rx = spi_imx_buf_rx_swap;
1243 		spi_imx->tx = spi_imx_buf_tx_swap;
1244 		spi_imx->dynamic_burst = 1;
1245 
1246 	} else {
1247 		if (spi_imx->bits_per_word <= 8) {
1248 			spi_imx->rx = spi_imx_buf_rx_u8;
1249 			spi_imx->tx = spi_imx_buf_tx_u8;
1250 		} else if (spi_imx->bits_per_word <= 16) {
1251 			spi_imx->rx = spi_imx_buf_rx_u16;
1252 			spi_imx->tx = spi_imx_buf_tx_u16;
1253 		} else {
1254 			spi_imx->rx = spi_imx_buf_rx_u32;
1255 			spi_imx->tx = spi_imx_buf_tx_u32;
1256 		}
1257 		spi_imx->dynamic_burst = 0;
1258 	}
1259 
1260 	if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
1261 		spi_imx->usedma = true;
1262 	else
1263 		spi_imx->usedma = false;
1264 
1265 	if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1266 		spi_imx->rx = mx53_ecspi_rx_slave;
1267 		spi_imx->tx = mx53_ecspi_tx_slave;
1268 		spi_imx->slave_burst = t->len;
1269 	}
1270 
1271 	spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
1272 
1273 	return 0;
1274 }
1275 
spi_imx_sdma_exit(struct spi_imx_data * spi_imx)1276 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1277 {
1278 	struct spi_master *master = spi_imx->bitbang.master;
1279 
1280 	if (master->dma_rx) {
1281 		dma_release_channel(master->dma_rx);
1282 		master->dma_rx = NULL;
1283 	}
1284 
1285 	if (master->dma_tx) {
1286 		dma_release_channel(master->dma_tx);
1287 		master->dma_tx = NULL;
1288 	}
1289 }
1290 
spi_imx_sdma_init(struct device * dev,struct spi_imx_data * spi_imx,struct spi_master * master)1291 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1292 			     struct spi_master *master)
1293 {
1294 	int ret;
1295 
1296 	spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1297 
1298 	/* Prepare for TX DMA: */
1299 	master->dma_tx = dma_request_chan(dev, "tx");
1300 	if (IS_ERR(master->dma_tx)) {
1301 		ret = PTR_ERR(master->dma_tx);
1302 		dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1303 		master->dma_tx = NULL;
1304 		goto err;
1305 	}
1306 
1307 	/* Prepare for RX : */
1308 	master->dma_rx = dma_request_chan(dev, "rx");
1309 	if (IS_ERR(master->dma_rx)) {
1310 		ret = PTR_ERR(master->dma_rx);
1311 		dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1312 		master->dma_rx = NULL;
1313 		goto err;
1314 	}
1315 
1316 	init_completion(&spi_imx->dma_rx_completion);
1317 	init_completion(&spi_imx->dma_tx_completion);
1318 	master->can_dma = spi_imx_can_dma;
1319 	master->max_dma_len = MAX_SDMA_BD_BYTES;
1320 	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
1321 					 SPI_MASTER_MUST_TX;
1322 
1323 	return 0;
1324 err:
1325 	spi_imx_sdma_exit(spi_imx);
1326 	return ret;
1327 }
1328 
spi_imx_dma_rx_callback(void * cookie)1329 static void spi_imx_dma_rx_callback(void *cookie)
1330 {
1331 	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1332 
1333 	complete(&spi_imx->dma_rx_completion);
1334 }
1335 
spi_imx_dma_tx_callback(void * cookie)1336 static void spi_imx_dma_tx_callback(void *cookie)
1337 {
1338 	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1339 
1340 	complete(&spi_imx->dma_tx_completion);
1341 }
1342 
spi_imx_calculate_timeout(struct spi_imx_data * spi_imx,int size)1343 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1344 {
1345 	unsigned long timeout = 0;
1346 
1347 	/* Time with actual data transfer and CS change delay related to HW */
1348 	timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1349 
1350 	/* Add extra second for scheduler related activities */
1351 	timeout += 1;
1352 
1353 	/* Double calculated timeout */
1354 	return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1355 }
1356 
spi_imx_dma_transfer(struct spi_imx_data * spi_imx,struct spi_transfer * transfer)1357 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1358 				struct spi_transfer *transfer)
1359 {
1360 	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1361 	unsigned long transfer_timeout;
1362 	unsigned long timeout;
1363 	struct spi_master *master = spi_imx->bitbang.master;
1364 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
1365 	struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1366 	unsigned int bytes_per_word, i;
1367 	int ret;
1368 
1369 	/* Get the right burst length from the last sg to ensure no tail data */
1370 	bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1371 	for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1372 		if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1373 			break;
1374 	}
1375 	/* Use 1 as wml in case no available burst length got */
1376 	if (i == 0)
1377 		i = 1;
1378 
1379 	spi_imx->wml =  i;
1380 
1381 	ret = spi_imx_dma_configure(master);
1382 	if (ret)
1383 		goto dma_failure_no_start;
1384 
1385 	if (!spi_imx->devtype_data->setup_wml) {
1386 		dev_err(spi_imx->dev, "No setup_wml()?\n");
1387 		ret = -EINVAL;
1388 		goto dma_failure_no_start;
1389 	}
1390 	spi_imx->devtype_data->setup_wml(spi_imx);
1391 
1392 	/*
1393 	 * The TX DMA setup starts the transfer, so make sure RX is configured
1394 	 * before TX.
1395 	 */
1396 	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
1397 				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1398 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1399 	if (!desc_rx) {
1400 		ret = -EINVAL;
1401 		goto dma_failure_no_start;
1402 	}
1403 
1404 	desc_rx->callback = spi_imx_dma_rx_callback;
1405 	desc_rx->callback_param = (void *)spi_imx;
1406 	dmaengine_submit(desc_rx);
1407 	reinit_completion(&spi_imx->dma_rx_completion);
1408 	dma_async_issue_pending(master->dma_rx);
1409 
1410 	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
1411 				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1412 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1413 	if (!desc_tx) {
1414 		dmaengine_terminate_all(master->dma_tx);
1415 		dmaengine_terminate_all(master->dma_rx);
1416 		return -EINVAL;
1417 	}
1418 
1419 	desc_tx->callback = spi_imx_dma_tx_callback;
1420 	desc_tx->callback_param = (void *)spi_imx;
1421 	dmaengine_submit(desc_tx);
1422 	reinit_completion(&spi_imx->dma_tx_completion);
1423 	dma_async_issue_pending(master->dma_tx);
1424 
1425 	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1426 
1427 	/* Wait SDMA to finish the data transfer.*/
1428 	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1429 						transfer_timeout);
1430 	if (!timeout) {
1431 		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1432 		dmaengine_terminate_all(master->dma_tx);
1433 		dmaengine_terminate_all(master->dma_rx);
1434 		return -ETIMEDOUT;
1435 	}
1436 
1437 	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1438 					      transfer_timeout);
1439 	if (!timeout) {
1440 		dev_err(&master->dev, "I/O Error in DMA RX\n");
1441 		spi_imx->devtype_data->reset(spi_imx);
1442 		dmaengine_terminate_all(master->dma_rx);
1443 		return -ETIMEDOUT;
1444 	}
1445 
1446 	return transfer->len;
1447 /* fallback to pio */
1448 dma_failure_no_start:
1449 	transfer->error |= SPI_TRANS_FAIL_NO_START;
1450 	return ret;
1451 }
1452 
spi_imx_pio_transfer(struct spi_device * spi,struct spi_transfer * transfer)1453 static int spi_imx_pio_transfer(struct spi_device *spi,
1454 				struct spi_transfer *transfer)
1455 {
1456 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1457 	unsigned long transfer_timeout;
1458 	unsigned long timeout;
1459 
1460 	spi_imx->tx_buf = transfer->tx_buf;
1461 	spi_imx->rx_buf = transfer->rx_buf;
1462 	spi_imx->count = transfer->len;
1463 	spi_imx->txfifo = 0;
1464 	spi_imx->remainder = 0;
1465 
1466 	reinit_completion(&spi_imx->xfer_done);
1467 
1468 	spi_imx_push(spi_imx);
1469 
1470 	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1471 
1472 	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1473 
1474 	timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1475 					      transfer_timeout);
1476 	if (!timeout) {
1477 		dev_err(&spi->dev, "I/O Error in PIO\n");
1478 		spi_imx->devtype_data->reset(spi_imx);
1479 		return -ETIMEDOUT;
1480 	}
1481 
1482 	return transfer->len;
1483 }
1484 
spi_imx_pio_transfer_slave(struct spi_device * spi,struct spi_transfer * transfer)1485 static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1486 				      struct spi_transfer *transfer)
1487 {
1488 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1489 	int ret = transfer->len;
1490 
1491 	if (is_imx53_ecspi(spi_imx) &&
1492 	    transfer->len > MX53_MAX_TRANSFER_BYTES) {
1493 		dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1494 			MX53_MAX_TRANSFER_BYTES);
1495 		return -EMSGSIZE;
1496 	}
1497 
1498 	spi_imx->tx_buf = transfer->tx_buf;
1499 	spi_imx->rx_buf = transfer->rx_buf;
1500 	spi_imx->count = transfer->len;
1501 	spi_imx->txfifo = 0;
1502 	spi_imx->remainder = 0;
1503 
1504 	reinit_completion(&spi_imx->xfer_done);
1505 	spi_imx->slave_aborted = false;
1506 
1507 	spi_imx_push(spi_imx);
1508 
1509 	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1510 
1511 	if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1512 	    spi_imx->slave_aborted) {
1513 		dev_dbg(&spi->dev, "interrupted\n");
1514 		ret = -EINTR;
1515 	}
1516 
1517 	/* ecspi has a HW issue when works in Slave mode,
1518 	 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1519 	 * ECSPI_TXDATA keeps shift out the last word data,
1520 	 * so we have to disable ECSPI when in slave mode after the
1521 	 * transfer completes
1522 	 */
1523 	if (spi_imx->devtype_data->disable)
1524 		spi_imx->devtype_data->disable(spi_imx);
1525 
1526 	return ret;
1527 }
1528 
spi_imx_transfer(struct spi_device * spi,struct spi_transfer * transfer)1529 static int spi_imx_transfer(struct spi_device *spi,
1530 				struct spi_transfer *transfer)
1531 {
1532 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1533 
1534 	transfer->effective_speed_hz = spi_imx->spi_bus_clk;
1535 
1536 	/* flush rxfifo before transfer */
1537 	while (spi_imx->devtype_data->rx_available(spi_imx))
1538 		readl(spi_imx->base + MXC_CSPIRXDATA);
1539 
1540 	if (spi_imx->slave_mode)
1541 		return spi_imx_pio_transfer_slave(spi, transfer);
1542 
1543 	if (spi_imx->usedma)
1544 		return spi_imx_dma_transfer(spi_imx, transfer);
1545 
1546 	return spi_imx_pio_transfer(spi, transfer);
1547 }
1548 
spi_imx_setup(struct spi_device * spi)1549 static int spi_imx_setup(struct spi_device *spi)
1550 {
1551 	dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1552 		 spi->mode, spi->bits_per_word, spi->max_speed_hz);
1553 
1554 	return 0;
1555 }
1556 
spi_imx_cleanup(struct spi_device * spi)1557 static void spi_imx_cleanup(struct spi_device *spi)
1558 {
1559 }
1560 
1561 static int
spi_imx_prepare_message(struct spi_master * master,struct spi_message * msg)1562 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1563 {
1564 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1565 	int ret;
1566 
1567 	ret = pm_runtime_resume_and_get(spi_imx->dev);
1568 	if (ret < 0) {
1569 		dev_err(spi_imx->dev, "failed to enable clock\n");
1570 		return ret;
1571 	}
1572 
1573 	ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1574 	if (ret) {
1575 		pm_runtime_mark_last_busy(spi_imx->dev);
1576 		pm_runtime_put_autosuspend(spi_imx->dev);
1577 	}
1578 
1579 	return ret;
1580 }
1581 
1582 static int
spi_imx_unprepare_message(struct spi_master * master,struct spi_message * msg)1583 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
1584 {
1585 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1586 
1587 	pm_runtime_mark_last_busy(spi_imx->dev);
1588 	pm_runtime_put_autosuspend(spi_imx->dev);
1589 	return 0;
1590 }
1591 
spi_imx_slave_abort(struct spi_master * master)1592 static int spi_imx_slave_abort(struct spi_master *master)
1593 {
1594 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1595 
1596 	spi_imx->slave_aborted = true;
1597 	complete(&spi_imx->xfer_done);
1598 
1599 	return 0;
1600 }
1601 
spi_imx_probe(struct platform_device * pdev)1602 static int spi_imx_probe(struct platform_device *pdev)
1603 {
1604 	struct device_node *np = pdev->dev.of_node;
1605 	struct spi_master *master;
1606 	struct spi_imx_data *spi_imx;
1607 	struct resource *res;
1608 	int ret, irq, spi_drctl;
1609 	const struct spi_imx_devtype_data *devtype_data =
1610 			of_device_get_match_data(&pdev->dev);
1611 	bool slave_mode;
1612 	u32 val;
1613 
1614 	slave_mode = devtype_data->has_slavemode &&
1615 			of_property_read_bool(np, "spi-slave");
1616 	if (slave_mode)
1617 		master = spi_alloc_slave(&pdev->dev,
1618 					 sizeof(struct spi_imx_data));
1619 	else
1620 		master = spi_alloc_master(&pdev->dev,
1621 					  sizeof(struct spi_imx_data));
1622 	if (!master)
1623 		return -ENOMEM;
1624 
1625 	ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1626 	if ((ret < 0) || (spi_drctl >= 0x3)) {
1627 		/* '11' is reserved */
1628 		spi_drctl = 0;
1629 	}
1630 
1631 	platform_set_drvdata(pdev, master);
1632 
1633 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1634 	master->bus_num = np ? -1 : pdev->id;
1635 	master->use_gpio_descriptors = true;
1636 
1637 	spi_imx = spi_master_get_devdata(master);
1638 	spi_imx->bitbang.master = master;
1639 	spi_imx->dev = &pdev->dev;
1640 	spi_imx->slave_mode = slave_mode;
1641 
1642 	spi_imx->devtype_data = devtype_data;
1643 
1644 	/*
1645 	 * Get number of chip selects from device properties. This can be
1646 	 * coming from device tree or boardfiles, if it is not defined,
1647 	 * a default value of 3 chip selects will be used, as all the legacy
1648 	 * board files have <= 3 chip selects.
1649 	 */
1650 	if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
1651 		master->num_chipselect = val;
1652 	else
1653 		master->num_chipselect = 3;
1654 
1655 	spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
1656 	spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
1657 	spi_imx->bitbang.master->setup = spi_imx_setup;
1658 	spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1659 	spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
1660 	spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1661 	spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
1662 	spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1663 					     | SPI_NO_CS;
1664 	if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1665 	    is_imx53_ecspi(spi_imx))
1666 		spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
1667 
1668 	if (is_imx51_ecspi(spi_imx) &&
1669 	    device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
1670 		/*
1671 		 * When using HW-CS implementing SPI_CS_WORD can be done by just
1672 		 * setting the burst length to the word size. This is
1673 		 * considerably faster than manually controlling the CS.
1674 		 */
1675 		spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
1676 
1677 	spi_imx->spi_drctl = spi_drctl;
1678 
1679 	init_completion(&spi_imx->xfer_done);
1680 
1681 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1682 	spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1683 	if (IS_ERR(spi_imx->base)) {
1684 		ret = PTR_ERR(spi_imx->base);
1685 		goto out_master_put;
1686 	}
1687 	spi_imx->base_phys = res->start;
1688 
1689 	irq = platform_get_irq(pdev, 0);
1690 	if (irq < 0) {
1691 		ret = irq;
1692 		goto out_master_put;
1693 	}
1694 
1695 	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1696 			       dev_name(&pdev->dev), spi_imx);
1697 	if (ret) {
1698 		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1699 		goto out_master_put;
1700 	}
1701 
1702 	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1703 	if (IS_ERR(spi_imx->clk_ipg)) {
1704 		ret = PTR_ERR(spi_imx->clk_ipg);
1705 		goto out_master_put;
1706 	}
1707 
1708 	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1709 	if (IS_ERR(spi_imx->clk_per)) {
1710 		ret = PTR_ERR(spi_imx->clk_per);
1711 		goto out_master_put;
1712 	}
1713 
1714 	ret = clk_prepare_enable(spi_imx->clk_per);
1715 	if (ret)
1716 		goto out_master_put;
1717 
1718 	ret = clk_prepare_enable(spi_imx->clk_ipg);
1719 	if (ret)
1720 		goto out_put_per;
1721 
1722 	pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
1723 	pm_runtime_use_autosuspend(spi_imx->dev);
1724 	pm_runtime_get_noresume(spi_imx->dev);
1725 	pm_runtime_set_active(spi_imx->dev);
1726 	pm_runtime_enable(spi_imx->dev);
1727 
1728 	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
1729 	/*
1730 	 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1731 	 * if validated on other chips.
1732 	 */
1733 	if (spi_imx->devtype_data->has_dmamode) {
1734 		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1735 		if (ret == -EPROBE_DEFER)
1736 			goto out_runtime_pm_put;
1737 
1738 		if (ret < 0)
1739 			dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
1740 				ret);
1741 	}
1742 
1743 	spi_imx->devtype_data->reset(spi_imx);
1744 
1745 	spi_imx->devtype_data->intctrl(spi_imx, 0);
1746 
1747 	master->dev.of_node = pdev->dev.of_node;
1748 	ret = spi_bitbang_start(&spi_imx->bitbang);
1749 	if (ret) {
1750 		dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
1751 		goto out_bitbang_start;
1752 	}
1753 
1754 	pm_runtime_mark_last_busy(spi_imx->dev);
1755 	pm_runtime_put_autosuspend(spi_imx->dev);
1756 
1757 	return ret;
1758 
1759 out_bitbang_start:
1760 	if (spi_imx->devtype_data->has_dmamode)
1761 		spi_imx_sdma_exit(spi_imx);
1762 out_runtime_pm_put:
1763 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
1764 	pm_runtime_set_suspended(&pdev->dev);
1765 	pm_runtime_disable(spi_imx->dev);
1766 
1767 	clk_disable_unprepare(spi_imx->clk_ipg);
1768 out_put_per:
1769 	clk_disable_unprepare(spi_imx->clk_per);
1770 out_master_put:
1771 	spi_master_put(master);
1772 
1773 	return ret;
1774 }
1775 
spi_imx_remove(struct platform_device * pdev)1776 static int spi_imx_remove(struct platform_device *pdev)
1777 {
1778 	struct spi_master *master = platform_get_drvdata(pdev);
1779 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1780 	int ret;
1781 
1782 	spi_bitbang_stop(&spi_imx->bitbang);
1783 
1784 	ret = pm_runtime_get_sync(spi_imx->dev);
1785 	if (ret >= 0)
1786 		writel(0, spi_imx->base + MXC_CSPICTRL);
1787 	else
1788 		dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
1789 
1790 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
1791 	pm_runtime_put_sync(spi_imx->dev);
1792 	pm_runtime_disable(spi_imx->dev);
1793 
1794 	spi_imx_sdma_exit(spi_imx);
1795 	spi_master_put(master);
1796 
1797 	return 0;
1798 }
1799 
spi_imx_runtime_resume(struct device * dev)1800 static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
1801 {
1802 	struct spi_master *master = dev_get_drvdata(dev);
1803 	struct spi_imx_data *spi_imx;
1804 	int ret;
1805 
1806 	spi_imx = spi_master_get_devdata(master);
1807 
1808 	ret = clk_prepare_enable(spi_imx->clk_per);
1809 	if (ret)
1810 		return ret;
1811 
1812 	ret = clk_prepare_enable(spi_imx->clk_ipg);
1813 	if (ret) {
1814 		clk_disable_unprepare(spi_imx->clk_per);
1815 		return ret;
1816 	}
1817 
1818 	return 0;
1819 }
1820 
spi_imx_runtime_suspend(struct device * dev)1821 static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
1822 {
1823 	struct spi_master *master = dev_get_drvdata(dev);
1824 	struct spi_imx_data *spi_imx;
1825 
1826 	spi_imx = spi_master_get_devdata(master);
1827 
1828 	clk_disable_unprepare(spi_imx->clk_per);
1829 	clk_disable_unprepare(spi_imx->clk_ipg);
1830 
1831 	return 0;
1832 }
1833 
spi_imx_suspend(struct device * dev)1834 static int __maybe_unused spi_imx_suspend(struct device *dev)
1835 {
1836 	pinctrl_pm_select_sleep_state(dev);
1837 	return 0;
1838 }
1839 
spi_imx_resume(struct device * dev)1840 static int __maybe_unused spi_imx_resume(struct device *dev)
1841 {
1842 	pinctrl_pm_select_default_state(dev);
1843 	return 0;
1844 }
1845 
1846 static const struct dev_pm_ops imx_spi_pm = {
1847 	SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
1848 				spi_imx_runtime_resume, NULL)
1849 	SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
1850 };
1851 
1852 static struct platform_driver spi_imx_driver = {
1853 	.driver = {
1854 		   .name = DRIVER_NAME,
1855 		   .of_match_table = spi_imx_dt_ids,
1856 		   .pm = &imx_spi_pm,
1857 	},
1858 	.probe = spi_imx_probe,
1859 	.remove = spi_imx_remove,
1860 };
1861 module_platform_driver(spi_imx_driver);
1862 
1863 MODULE_DESCRIPTION("i.MX SPI Controller driver");
1864 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1865 MODULE_LICENSE("GPL");
1866 MODULE_ALIAS("platform:" DRIVER_NAME);
1867