• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SPI bus driver for CSR SiRFprimaII
4  *
5  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/bitops.h>
17 #include <linux/err.h>
18 #include <linux/platform_device.h>
19 #include <linux/of_gpio.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi_bitbang.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/reset.h>
26 
27 #define DRIVER_NAME "sirfsoc_spi"
28 /* SPI CTRL register defines */
29 #define SIRFSOC_SPI_SLV_MODE		BIT(16)
30 #define SIRFSOC_SPI_CMD_MODE		BIT(17)
31 #define SIRFSOC_SPI_CS_IO_OUT		BIT(18)
32 #define SIRFSOC_SPI_CS_IO_MODE		BIT(19)
33 #define SIRFSOC_SPI_CLK_IDLE_STAT	BIT(20)
34 #define SIRFSOC_SPI_CS_IDLE_STAT	BIT(21)
35 #define SIRFSOC_SPI_TRAN_MSB		BIT(22)
36 #define SIRFSOC_SPI_DRV_POS_EDGE	BIT(23)
37 #define SIRFSOC_SPI_CS_HOLD_TIME	BIT(24)
38 #define SIRFSOC_SPI_CLK_SAMPLE_MODE	BIT(25)
39 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8	(0 << 26)
40 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12	(1 << 26)
41 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16	(2 << 26)
42 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32	(3 << 26)
43 #define SIRFSOC_SPI_CMD_BYTE_NUM(x)	((x & 3) << 28)
44 #define SIRFSOC_SPI_ENA_AUTO_CLR	BIT(30)
45 #define SIRFSOC_SPI_MUL_DAT_MODE	BIT(31)
46 
47 /* Interrupt Enable */
48 #define SIRFSOC_SPI_RX_DONE_INT_EN	BIT(0)
49 #define SIRFSOC_SPI_TX_DONE_INT_EN	BIT(1)
50 #define SIRFSOC_SPI_RX_OFLOW_INT_EN	BIT(2)
51 #define SIRFSOC_SPI_TX_UFLOW_INT_EN	BIT(3)
52 #define SIRFSOC_SPI_RX_IO_DMA_INT_EN	BIT(4)
53 #define SIRFSOC_SPI_TX_IO_DMA_INT_EN	BIT(5)
54 #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN	BIT(6)
55 #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN	BIT(7)
56 #define SIRFSOC_SPI_RXFIFO_THD_INT_EN	BIT(8)
57 #define SIRFSOC_SPI_TXFIFO_THD_INT_EN	BIT(9)
58 #define SIRFSOC_SPI_FRM_END_INT_EN	BIT(10)
59 
60 /* Interrupt status */
61 #define SIRFSOC_SPI_RX_DONE		BIT(0)
62 #define SIRFSOC_SPI_TX_DONE		BIT(1)
63 #define SIRFSOC_SPI_RX_OFLOW		BIT(2)
64 #define SIRFSOC_SPI_TX_UFLOW		BIT(3)
65 #define SIRFSOC_SPI_RX_IO_DMA		BIT(4)
66 #define SIRFSOC_SPI_RX_FIFO_FULL	BIT(6)
67 #define SIRFSOC_SPI_TXFIFO_EMPTY	BIT(7)
68 #define SIRFSOC_SPI_RXFIFO_THD_REACH	BIT(8)
69 #define SIRFSOC_SPI_TXFIFO_THD_REACH	BIT(9)
70 #define SIRFSOC_SPI_FRM_END		BIT(10)
71 
72 /* TX RX enable */
73 #define SIRFSOC_SPI_RX_EN		BIT(0)
74 #define SIRFSOC_SPI_TX_EN		BIT(1)
75 #define SIRFSOC_SPI_CMD_TX_EN		BIT(2)
76 
77 #define SIRFSOC_SPI_IO_MODE_SEL		BIT(0)
78 #define SIRFSOC_SPI_RX_DMA_FLUSH	BIT(2)
79 
80 /* FIFO OPs */
81 #define SIRFSOC_SPI_FIFO_RESET		BIT(0)
82 #define SIRFSOC_SPI_FIFO_START		BIT(1)
83 
84 /* FIFO CTRL */
85 #define SIRFSOC_SPI_FIFO_WIDTH_BYTE	(0 << 0)
86 #define SIRFSOC_SPI_FIFO_WIDTH_WORD	(1 << 0)
87 #define SIRFSOC_SPI_FIFO_WIDTH_DWORD	(2 << 0)
88 /* USP related */
89 #define SIRFSOC_USP_SYNC_MODE		BIT(0)
90 #define SIRFSOC_USP_SLV_MODE		BIT(1)
91 #define SIRFSOC_USP_LSB			BIT(4)
92 #define SIRFSOC_USP_EN			BIT(5)
93 #define SIRFSOC_USP_RXD_FALLING_EDGE	BIT(6)
94 #define SIRFSOC_USP_TXD_FALLING_EDGE	BIT(7)
95 #define SIRFSOC_USP_CS_HIGH_VALID	BIT(9)
96 #define SIRFSOC_USP_SCLK_IDLE_STAT	BIT(11)
97 #define SIRFSOC_USP_TFS_IO_MODE		BIT(14)
98 #define SIRFSOC_USP_TFS_IO_INPUT	BIT(19)
99 
100 #define SIRFSOC_USP_RXD_DELAY_LEN_MASK	0xFF
101 #define SIRFSOC_USP_TXD_DELAY_LEN_MASK	0xFF
102 #define SIRFSOC_USP_RXD_DELAY_OFFSET	0
103 #define SIRFSOC_USP_TXD_DELAY_OFFSET	8
104 #define SIRFSOC_USP_RXD_DELAY_LEN	1
105 #define SIRFSOC_USP_TXD_DELAY_LEN	1
106 #define SIRFSOC_USP_CLK_DIVISOR_OFFSET	21
107 #define SIRFSOC_USP_CLK_DIVISOR_MASK	0x3FF
108 #define SIRFSOC_USP_CLK_10_11_MASK	0x3
109 #define SIRFSOC_USP_CLK_10_11_OFFSET	30
110 #define SIRFSOC_USP_CLK_12_15_MASK	0xF
111 #define SIRFSOC_USP_CLK_12_15_OFFSET	24
112 
113 #define SIRFSOC_USP_TX_DATA_OFFSET	0
114 #define SIRFSOC_USP_TX_SYNC_OFFSET	8
115 #define SIRFSOC_USP_TX_FRAME_OFFSET	16
116 #define SIRFSOC_USP_TX_SHIFTER_OFFSET	24
117 
118 #define SIRFSOC_USP_TX_DATA_MASK	0xFF
119 #define SIRFSOC_USP_TX_SYNC_MASK	0xFF
120 #define SIRFSOC_USP_TX_FRAME_MASK	0xFF
121 #define SIRFSOC_USP_TX_SHIFTER_MASK	0x1F
122 
123 #define SIRFSOC_USP_RX_DATA_OFFSET	0
124 #define SIRFSOC_USP_RX_FRAME_OFFSET	8
125 #define SIRFSOC_USP_RX_SHIFTER_OFFSET	16
126 
127 #define SIRFSOC_USP_RX_DATA_MASK	0xFF
128 #define SIRFSOC_USP_RX_FRAME_MASK	0xFF
129 #define SIRFSOC_USP_RX_SHIFTER_MASK	0x1F
130 #define SIRFSOC_USP_CS_HIGH_VALUE	BIT(1)
131 
132 #define SIRFSOC_SPI_FIFO_SC_OFFSET	0
133 #define SIRFSOC_SPI_FIFO_LC_OFFSET	10
134 #define SIRFSOC_SPI_FIFO_HC_OFFSET	20
135 
136 #define SIRFSOC_SPI_FIFO_FULL_MASK(s)	(1 << ((s)->fifo_full_offset))
137 #define SIRFSOC_SPI_FIFO_EMPTY_MASK(s)	(1 << ((s)->fifo_full_offset + 1))
138 #define SIRFSOC_SPI_FIFO_THD_MASK(s)	((s)->fifo_size - 1)
139 #define SIRFSOC_SPI_FIFO_THD_OFFSET	2
140 #define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val)	\
141 	((val) & (s)->fifo_level_chk_mask)
142 
143 enum sirf_spi_type {
144 	SIRF_REAL_SPI,
145 	SIRF_USP_SPI_P2,
146 	SIRF_USP_SPI_A7,
147 };
148 
149 /*
150  * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
151  * due to the limitation of dma controller
152  */
153 
154 #define ALIGNED(x) (!((u32)x & 0x3))
155 #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
156 	ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
157 
158 #define SIRFSOC_MAX_CMD_BYTES	4
159 #define SIRFSOC_SPI_DEFAULT_FRQ 1000000
160 
161 struct sirf_spi_register {
162 	/*SPI and USP-SPI common*/
163 	u32 tx_rx_en;
164 	u32 int_en;
165 	u32 int_st;
166 	u32 tx_dma_io_ctrl;
167 	u32 tx_dma_io_len;
168 	u32 txfifo_ctrl;
169 	u32 txfifo_level_chk;
170 	u32 txfifo_op;
171 	u32 txfifo_st;
172 	u32 txfifo_data;
173 	u32 rx_dma_io_ctrl;
174 	u32 rx_dma_io_len;
175 	u32 rxfifo_ctrl;
176 	u32 rxfifo_level_chk;
177 	u32 rxfifo_op;
178 	u32 rxfifo_st;
179 	u32 rxfifo_data;
180 	/*SPI self*/
181 	u32 spi_ctrl;
182 	u32 spi_cmd;
183 	u32 spi_dummy_delay_ctrl;
184 	/*USP-SPI self*/
185 	u32 usp_mode1;
186 	u32 usp_mode2;
187 	u32 usp_tx_frame_ctrl;
188 	u32 usp_rx_frame_ctrl;
189 	u32 usp_pin_io_data;
190 	u32 usp_risc_dsp_mode;
191 	u32 usp_async_param_reg;
192 	u32 usp_irda_x_mode_div;
193 	u32 usp_sm_cfg;
194 	u32 usp_int_en_clr;
195 };
196 
197 static const struct sirf_spi_register real_spi_register = {
198 	.tx_rx_en		= 0x8,
199 	.int_en		= 0xc,
200 	.int_st		= 0x10,
201 	.tx_dma_io_ctrl	= 0x100,
202 	.tx_dma_io_len	= 0x104,
203 	.txfifo_ctrl	= 0x108,
204 	.txfifo_level_chk	= 0x10c,
205 	.txfifo_op		= 0x110,
206 	.txfifo_st		= 0x114,
207 	.txfifo_data	= 0x118,
208 	.rx_dma_io_ctrl	= 0x120,
209 	.rx_dma_io_len	= 0x124,
210 	.rxfifo_ctrl	= 0x128,
211 	.rxfifo_level_chk	= 0x12c,
212 	.rxfifo_op		= 0x130,
213 	.rxfifo_st		= 0x134,
214 	.rxfifo_data	= 0x138,
215 	.spi_ctrl		= 0x0,
216 	.spi_cmd		= 0x4,
217 	.spi_dummy_delay_ctrl	= 0x144,
218 };
219 
220 static const struct sirf_spi_register usp_spi_register = {
221 	.tx_rx_en		= 0x10,
222 	.int_en		= 0x14,
223 	.int_st		= 0x18,
224 	.tx_dma_io_ctrl	= 0x100,
225 	.tx_dma_io_len	= 0x104,
226 	.txfifo_ctrl	= 0x108,
227 	.txfifo_level_chk	= 0x10c,
228 	.txfifo_op		= 0x110,
229 	.txfifo_st		= 0x114,
230 	.txfifo_data	= 0x118,
231 	.rx_dma_io_ctrl	= 0x120,
232 	.rx_dma_io_len	= 0x124,
233 	.rxfifo_ctrl	= 0x128,
234 	.rxfifo_level_chk	= 0x12c,
235 	.rxfifo_op		= 0x130,
236 	.rxfifo_st		= 0x134,
237 	.rxfifo_data	= 0x138,
238 	.usp_mode1		= 0x0,
239 	.usp_mode2		= 0x4,
240 	.usp_tx_frame_ctrl	= 0x8,
241 	.usp_rx_frame_ctrl	= 0xc,
242 	.usp_pin_io_data	= 0x1c,
243 	.usp_risc_dsp_mode	= 0x20,
244 	.usp_async_param_reg	= 0x24,
245 	.usp_irda_x_mode_div	= 0x28,
246 	.usp_sm_cfg		= 0x2c,
247 	.usp_int_en_clr		= 0x140,
248 };
249 
250 struct sirfsoc_spi {
251 	struct spi_bitbang bitbang;
252 	struct completion rx_done;
253 	struct completion tx_done;
254 
255 	void __iomem *base;
256 	u32 ctrl_freq;  /* SPI controller clock speed */
257 	struct clk *clk;
258 
259 	/* rx & tx bufs from the spi_transfer */
260 	const void *tx;
261 	void *rx;
262 
263 	/* place received word into rx buffer */
264 	void (*rx_word) (struct sirfsoc_spi *);
265 	/* get word from tx buffer for sending */
266 	void (*tx_word) (struct sirfsoc_spi *);
267 
268 	/* number of words left to be tranmitted/received */
269 	unsigned int left_tx_word;
270 	unsigned int left_rx_word;
271 
272 	/* rx & tx DMA channels */
273 	struct dma_chan *rx_chan;
274 	struct dma_chan *tx_chan;
275 	dma_addr_t src_start;
276 	dma_addr_t dst_start;
277 	int word_width; /* in bytes */
278 
279 	/*
280 	 * if tx size is not more than 4 and rx size is NULL, use
281 	 * command model
282 	 */
283 	bool	tx_by_cmd;
284 	bool	hw_cs;
285 	enum sirf_spi_type type;
286 	const struct sirf_spi_register *regs;
287 	unsigned int fifo_size;
288 	/* fifo empty offset is (fifo full offset + 1)*/
289 	unsigned int fifo_full_offset;
290 	/* fifo_level_chk_mask is (fifo_size/4 - 1) */
291 	unsigned int fifo_level_chk_mask;
292 	unsigned int dat_max_frm_len;
293 };
294 
295 struct sirf_spi_comp_data {
296 	const struct sirf_spi_register *regs;
297 	enum sirf_spi_type type;
298 	unsigned int dat_max_frm_len;
299 	unsigned int fifo_size;
300 	void (*hwinit)(struct sirfsoc_spi *sspi);
301 };
302 
sirfsoc_usp_hwinit(struct sirfsoc_spi * sspi)303 static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
304 {
305 	/* reset USP and let USP can operate */
306 	writel(readl(sspi->base + sspi->regs->usp_mode1) &
307 		~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
308 	writel(readl(sspi->base + sspi->regs->usp_mode1) |
309 		SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
310 }
311 
spi_sirfsoc_rx_word_u8(struct sirfsoc_spi * sspi)312 static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
313 {
314 	u32 data;
315 	u8 *rx = sspi->rx;
316 
317 	data = readl(sspi->base + sspi->regs->rxfifo_data);
318 
319 	if (rx) {
320 		*rx++ = (u8) data;
321 		sspi->rx = rx;
322 	}
323 
324 	sspi->left_rx_word--;
325 }
326 
spi_sirfsoc_tx_word_u8(struct sirfsoc_spi * sspi)327 static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
328 {
329 	u32 data = 0;
330 	const u8 *tx = sspi->tx;
331 
332 	if (tx) {
333 		data = *tx++;
334 		sspi->tx = tx;
335 	}
336 	writel(data, sspi->base + sspi->regs->txfifo_data);
337 	sspi->left_tx_word--;
338 }
339 
spi_sirfsoc_rx_word_u16(struct sirfsoc_spi * sspi)340 static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
341 {
342 	u32 data;
343 	u16 *rx = sspi->rx;
344 
345 	data = readl(sspi->base + sspi->regs->rxfifo_data);
346 
347 	if (rx) {
348 		*rx++ = (u16) data;
349 		sspi->rx = rx;
350 	}
351 
352 	sspi->left_rx_word--;
353 }
354 
spi_sirfsoc_tx_word_u16(struct sirfsoc_spi * sspi)355 static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
356 {
357 	u32 data = 0;
358 	const u16 *tx = sspi->tx;
359 
360 	if (tx) {
361 		data = *tx++;
362 		sspi->tx = tx;
363 	}
364 
365 	writel(data, sspi->base + sspi->regs->txfifo_data);
366 	sspi->left_tx_word--;
367 }
368 
spi_sirfsoc_rx_word_u32(struct sirfsoc_spi * sspi)369 static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
370 {
371 	u32 data;
372 	u32 *rx = sspi->rx;
373 
374 	data = readl(sspi->base + sspi->regs->rxfifo_data);
375 
376 	if (rx) {
377 		*rx++ = (u32) data;
378 		sspi->rx = rx;
379 	}
380 
381 	sspi->left_rx_word--;
382 
383 }
384 
spi_sirfsoc_tx_word_u32(struct sirfsoc_spi * sspi)385 static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
386 {
387 	u32 data = 0;
388 	const u32 *tx = sspi->tx;
389 
390 	if (tx) {
391 		data = *tx++;
392 		sspi->tx = tx;
393 	}
394 
395 	writel(data, sspi->base + sspi->regs->txfifo_data);
396 	sspi->left_tx_word--;
397 }
398 
spi_sirfsoc_irq(int irq,void * dev_id)399 static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
400 {
401 	struct sirfsoc_spi *sspi = dev_id;
402 	u32 spi_stat;
403 
404 	spi_stat = readl(sspi->base + sspi->regs->int_st);
405 	if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
406 		&& (spi_stat & SIRFSOC_SPI_FRM_END)) {
407 		complete(&sspi->tx_done);
408 		writel(0x0, sspi->base + sspi->regs->int_en);
409 		writel(readl(sspi->base + sspi->regs->int_st),
410 				sspi->base + sspi->regs->int_st);
411 		return IRQ_HANDLED;
412 	}
413 	/* Error Conditions */
414 	if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
415 			spi_stat & SIRFSOC_SPI_TX_UFLOW) {
416 		complete(&sspi->tx_done);
417 		complete(&sspi->rx_done);
418 		switch (sspi->type) {
419 		case SIRF_REAL_SPI:
420 		case SIRF_USP_SPI_P2:
421 			writel(0x0, sspi->base + sspi->regs->int_en);
422 			break;
423 		case SIRF_USP_SPI_A7:
424 			writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
425 			break;
426 		}
427 		writel(readl(sspi->base + sspi->regs->int_st),
428 				sspi->base + sspi->regs->int_st);
429 		return IRQ_HANDLED;
430 	}
431 	if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
432 		complete(&sspi->tx_done);
433 	while (!(readl(sspi->base + sspi->regs->int_st) &
434 		SIRFSOC_SPI_RX_IO_DMA))
435 		cpu_relax();
436 	complete(&sspi->rx_done);
437 	switch (sspi->type) {
438 	case SIRF_REAL_SPI:
439 	case SIRF_USP_SPI_P2:
440 		writel(0x0, sspi->base + sspi->regs->int_en);
441 		break;
442 	case SIRF_USP_SPI_A7:
443 		writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
444 		break;
445 	}
446 	writel(readl(sspi->base + sspi->regs->int_st),
447 			sspi->base + sspi->regs->int_st);
448 
449 	return IRQ_HANDLED;
450 }
451 
spi_sirfsoc_dma_fini_callback(void * data)452 static void spi_sirfsoc_dma_fini_callback(void *data)
453 {
454 	struct completion *dma_complete = data;
455 
456 	complete(dma_complete);
457 }
458 
spi_sirfsoc_cmd_transfer(struct spi_device * spi,struct spi_transfer * t)459 static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
460 	struct spi_transfer *t)
461 {
462 	struct sirfsoc_spi *sspi;
463 	int timeout = t->len * 10;
464 	u32 cmd;
465 
466 	sspi = spi_master_get_devdata(spi->master);
467 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
468 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
469 	memcpy(&cmd, sspi->tx, t->len);
470 	if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
471 		cmd = cpu_to_be32(cmd) >>
472 			((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
473 	if (sspi->word_width == 2 && t->len == 4 &&
474 			(!(spi->mode & SPI_LSB_FIRST)))
475 		cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
476 	writel(cmd, sspi->base + sspi->regs->spi_cmd);
477 	writel(SIRFSOC_SPI_FRM_END_INT_EN,
478 		sspi->base + sspi->regs->int_en);
479 	writel(SIRFSOC_SPI_CMD_TX_EN,
480 		sspi->base + sspi->regs->tx_rx_en);
481 	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
482 		dev_err(&spi->dev, "cmd transfer timeout\n");
483 		return;
484 	}
485 	sspi->left_rx_word -= t->len;
486 }
487 
spi_sirfsoc_dma_transfer(struct spi_device * spi,struct spi_transfer * t)488 static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
489 	struct spi_transfer *t)
490 {
491 	struct sirfsoc_spi *sspi;
492 	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
493 	int timeout = t->len * 10;
494 
495 	sspi = spi_master_get_devdata(spi->master);
496 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
497 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
498 	switch (sspi->type) {
499 	case SIRF_REAL_SPI:
500 		writel(SIRFSOC_SPI_FIFO_START,
501 			sspi->base + sspi->regs->rxfifo_op);
502 		writel(SIRFSOC_SPI_FIFO_START,
503 			sspi->base + sspi->regs->txfifo_op);
504 		writel(0, sspi->base + sspi->regs->int_en);
505 		break;
506 	case SIRF_USP_SPI_P2:
507 		writel(0x0, sspi->base + sspi->regs->rxfifo_op);
508 		writel(0x0, sspi->base + sspi->regs->txfifo_op);
509 		writel(0, sspi->base + sspi->regs->int_en);
510 		break;
511 	case SIRF_USP_SPI_A7:
512 		writel(0x0, sspi->base + sspi->regs->rxfifo_op);
513 		writel(0x0, sspi->base + sspi->regs->txfifo_op);
514 		writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
515 		break;
516 	}
517 	writel(readl(sspi->base + sspi->regs->int_st),
518 		sspi->base + sspi->regs->int_st);
519 	if (sspi->left_tx_word < sspi->dat_max_frm_len) {
520 		switch (sspi->type) {
521 		case SIRF_REAL_SPI:
522 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
523 				SIRFSOC_SPI_ENA_AUTO_CLR |
524 				SIRFSOC_SPI_MUL_DAT_MODE,
525 				sspi->base + sspi->regs->spi_ctrl);
526 			writel(sspi->left_tx_word - 1,
527 				sspi->base + sspi->regs->tx_dma_io_len);
528 			writel(sspi->left_tx_word - 1,
529 				sspi->base + sspi->regs->rx_dma_io_len);
530 			break;
531 		case SIRF_USP_SPI_P2:
532 		case SIRF_USP_SPI_A7:
533 			/*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
534 			writel(sspi->left_tx_word * sspi->word_width,
535 				sspi->base + sspi->regs->tx_dma_io_len);
536 			writel(sspi->left_tx_word * sspi->word_width,
537 				sspi->base + sspi->regs->rx_dma_io_len);
538 			break;
539 		}
540 	} else {
541 		if (sspi->type == SIRF_REAL_SPI)
542 			writel(readl(sspi->base + sspi->regs->spi_ctrl),
543 				sspi->base + sspi->regs->spi_ctrl);
544 		writel(0, sspi->base + sspi->regs->tx_dma_io_len);
545 		writel(0, sspi->base + sspi->regs->rx_dma_io_len);
546 	}
547 	sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
548 					(t->tx_buf != t->rx_buf) ?
549 					DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
550 	rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
551 		sspi->dst_start, t->len, DMA_DEV_TO_MEM,
552 		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
553 	rx_desc->callback = spi_sirfsoc_dma_fini_callback;
554 	rx_desc->callback_param = &sspi->rx_done;
555 
556 	sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
557 					(t->tx_buf != t->rx_buf) ?
558 					DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
559 	tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
560 		sspi->src_start, t->len, DMA_MEM_TO_DEV,
561 		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
562 	tx_desc->callback = spi_sirfsoc_dma_fini_callback;
563 	tx_desc->callback_param = &sspi->tx_done;
564 
565 	dmaengine_submit(tx_desc);
566 	dmaengine_submit(rx_desc);
567 	dma_async_issue_pending(sspi->tx_chan);
568 	dma_async_issue_pending(sspi->rx_chan);
569 	writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
570 			sspi->base + sspi->regs->tx_rx_en);
571 	if (sspi->type == SIRF_USP_SPI_P2 ||
572 		sspi->type == SIRF_USP_SPI_A7) {
573 		writel(SIRFSOC_SPI_FIFO_START,
574 			sspi->base + sspi->regs->rxfifo_op);
575 		writel(SIRFSOC_SPI_FIFO_START,
576 			sspi->base + sspi->regs->txfifo_op);
577 	}
578 	if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
579 		dev_err(&spi->dev, "transfer timeout\n");
580 		dmaengine_terminate_all(sspi->rx_chan);
581 	} else
582 		sspi->left_rx_word = 0;
583 	/*
584 	 * we only wait tx-done event if transferring by DMA. for PIO,
585 	 * we get rx data by writing tx data, so if rx is done, tx has
586 	 * done earlier
587 	 */
588 	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
589 		dev_err(&spi->dev, "transfer timeout\n");
590 		if (sspi->type == SIRF_USP_SPI_P2 ||
591 			sspi->type == SIRF_USP_SPI_A7)
592 			writel(0, sspi->base + sspi->regs->tx_rx_en);
593 		dmaengine_terminate_all(sspi->tx_chan);
594 	}
595 	dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
596 	dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
597 	/* TX, RX FIFO stop */
598 	writel(0, sspi->base + sspi->regs->rxfifo_op);
599 	writel(0, sspi->base + sspi->regs->txfifo_op);
600 	if (sspi->left_tx_word >= sspi->dat_max_frm_len)
601 		writel(0, sspi->base + sspi->regs->tx_rx_en);
602 	if (sspi->type == SIRF_USP_SPI_P2 ||
603 		sspi->type == SIRF_USP_SPI_A7)
604 		writel(0, sspi->base + sspi->regs->tx_rx_en);
605 }
606 
spi_sirfsoc_pio_transfer(struct spi_device * spi,struct spi_transfer * t)607 static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
608 		struct spi_transfer *t)
609 {
610 	struct sirfsoc_spi *sspi;
611 	int timeout = t->len * 10;
612 	unsigned int data_units;
613 
614 	sspi = spi_master_get_devdata(spi->master);
615 	do {
616 		writel(SIRFSOC_SPI_FIFO_RESET,
617 			sspi->base + sspi->regs->rxfifo_op);
618 		writel(SIRFSOC_SPI_FIFO_RESET,
619 			sspi->base + sspi->regs->txfifo_op);
620 		switch (sspi->type) {
621 		case SIRF_USP_SPI_P2:
622 			writel(0x0, sspi->base + sspi->regs->rxfifo_op);
623 			writel(0x0, sspi->base + sspi->regs->txfifo_op);
624 			writel(0, sspi->base + sspi->regs->int_en);
625 			writel(readl(sspi->base + sspi->regs->int_st),
626 				sspi->base + sspi->regs->int_st);
627 			writel(min((sspi->left_tx_word * sspi->word_width),
628 				sspi->fifo_size),
629 				sspi->base + sspi->regs->tx_dma_io_len);
630 			writel(min((sspi->left_rx_word * sspi->word_width),
631 				sspi->fifo_size),
632 				sspi->base + sspi->regs->rx_dma_io_len);
633 			break;
634 		case SIRF_USP_SPI_A7:
635 			writel(0x0, sspi->base + sspi->regs->rxfifo_op);
636 			writel(0x0, sspi->base + sspi->regs->txfifo_op);
637 			writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
638 			writel(readl(sspi->base + sspi->regs->int_st),
639 				sspi->base + sspi->regs->int_st);
640 			writel(min((sspi->left_tx_word * sspi->word_width),
641 				sspi->fifo_size),
642 				sspi->base + sspi->regs->tx_dma_io_len);
643 			writel(min((sspi->left_rx_word * sspi->word_width),
644 				sspi->fifo_size),
645 				sspi->base + sspi->regs->rx_dma_io_len);
646 			break;
647 		case SIRF_REAL_SPI:
648 			writel(SIRFSOC_SPI_FIFO_START,
649 				sspi->base + sspi->regs->rxfifo_op);
650 			writel(SIRFSOC_SPI_FIFO_START,
651 				sspi->base + sspi->regs->txfifo_op);
652 			writel(0, sspi->base + sspi->regs->int_en);
653 			writel(readl(sspi->base + sspi->regs->int_st),
654 				sspi->base + sspi->regs->int_st);
655 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
656 				SIRFSOC_SPI_MUL_DAT_MODE |
657 				SIRFSOC_SPI_ENA_AUTO_CLR,
658 				sspi->base + sspi->regs->spi_ctrl);
659 			data_units = sspi->fifo_size / sspi->word_width;
660 			writel(min(sspi->left_tx_word, data_units) - 1,
661 				sspi->base + sspi->regs->tx_dma_io_len);
662 			writel(min(sspi->left_rx_word, data_units) - 1,
663 				sspi->base + sspi->regs->rx_dma_io_len);
664 			break;
665 		}
666 		while (!((readl(sspi->base + sspi->regs->txfifo_st)
667 			& SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
668 			sspi->left_tx_word)
669 			sspi->tx_word(sspi);
670 		writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
671 			SIRFSOC_SPI_TX_UFLOW_INT_EN |
672 			SIRFSOC_SPI_RX_OFLOW_INT_EN |
673 			SIRFSOC_SPI_RX_IO_DMA_INT_EN,
674 			sspi->base + sspi->regs->int_en);
675 		writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
676 			sspi->base + sspi->regs->tx_rx_en);
677 		if (sspi->type == SIRF_USP_SPI_P2 ||
678 			sspi->type == SIRF_USP_SPI_A7) {
679 			writel(SIRFSOC_SPI_FIFO_START,
680 				sspi->base + sspi->regs->rxfifo_op);
681 			writel(SIRFSOC_SPI_FIFO_START,
682 				sspi->base + sspi->regs->txfifo_op);
683 		}
684 		if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
685 			!wait_for_completion_timeout(&sspi->rx_done, timeout)) {
686 			dev_err(&spi->dev, "transfer timeout\n");
687 			if (sspi->type == SIRF_USP_SPI_P2 ||
688 				sspi->type == SIRF_USP_SPI_A7)
689 				writel(0, sspi->base + sspi->regs->tx_rx_en);
690 			break;
691 		}
692 		while (!((readl(sspi->base + sspi->regs->rxfifo_st)
693 			& SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
694 			sspi->left_rx_word)
695 			sspi->rx_word(sspi);
696 		if (sspi->type == SIRF_USP_SPI_P2 ||
697 			sspi->type == SIRF_USP_SPI_A7)
698 			writel(0, sspi->base + sspi->regs->tx_rx_en);
699 		writel(0, sspi->base + sspi->regs->rxfifo_op);
700 		writel(0, sspi->base + sspi->regs->txfifo_op);
701 	} while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
702 }
703 
spi_sirfsoc_transfer(struct spi_device * spi,struct spi_transfer * t)704 static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
705 {
706 	struct sirfsoc_spi *sspi;
707 
708 	sspi = spi_master_get_devdata(spi->master);
709 	sspi->tx = t->tx_buf;
710 	sspi->rx = t->rx_buf;
711 	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
712 	reinit_completion(&sspi->rx_done);
713 	reinit_completion(&sspi->tx_done);
714 	/*
715 	 * in the transfer, if transfer data using command register with rx_buf
716 	 * null, just fill command data into command register and wait for its
717 	 * completion.
718 	 */
719 	if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
720 		spi_sirfsoc_cmd_transfer(spi, t);
721 	else if (IS_DMA_VALID(t))
722 		spi_sirfsoc_dma_transfer(spi, t);
723 	else
724 		spi_sirfsoc_pio_transfer(spi, t);
725 
726 	return t->len - sspi->left_rx_word * sspi->word_width;
727 }
728 
spi_sirfsoc_chipselect(struct spi_device * spi,int value)729 static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
730 {
731 	struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
732 
733 	if (sspi->hw_cs) {
734 		u32 regval;
735 
736 		switch (sspi->type) {
737 		case SIRF_REAL_SPI:
738 			regval = readl(sspi->base + sspi->regs->spi_ctrl);
739 			switch (value) {
740 			case BITBANG_CS_ACTIVE:
741 				if (spi->mode & SPI_CS_HIGH)
742 					regval |= SIRFSOC_SPI_CS_IO_OUT;
743 				else
744 					regval &= ~SIRFSOC_SPI_CS_IO_OUT;
745 				break;
746 			case BITBANG_CS_INACTIVE:
747 				if (spi->mode & SPI_CS_HIGH)
748 					regval &= ~SIRFSOC_SPI_CS_IO_OUT;
749 				else
750 					regval |= SIRFSOC_SPI_CS_IO_OUT;
751 				break;
752 			}
753 			writel(regval, sspi->base + sspi->regs->spi_ctrl);
754 			break;
755 		case SIRF_USP_SPI_P2:
756 		case SIRF_USP_SPI_A7:
757 			regval = readl(sspi->base +
758 					sspi->regs->usp_pin_io_data);
759 			switch (value) {
760 			case BITBANG_CS_ACTIVE:
761 				if (spi->mode & SPI_CS_HIGH)
762 					regval |= SIRFSOC_USP_CS_HIGH_VALUE;
763 				else
764 					regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
765 				break;
766 			case BITBANG_CS_INACTIVE:
767 				if (spi->mode & SPI_CS_HIGH)
768 					regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
769 				else
770 					regval |= SIRFSOC_USP_CS_HIGH_VALUE;
771 				break;
772 			}
773 			writel(regval,
774 				sspi->base + sspi->regs->usp_pin_io_data);
775 			break;
776 		}
777 	} else {
778 		switch (value) {
779 		case BITBANG_CS_ACTIVE:
780 			gpio_direction_output(spi->cs_gpio,
781 					spi->mode & SPI_CS_HIGH ? 1 : 0);
782 			break;
783 		case BITBANG_CS_INACTIVE:
784 			gpio_direction_output(spi->cs_gpio,
785 					spi->mode & SPI_CS_HIGH ? 0 : 1);
786 			break;
787 		}
788 	}
789 }
790 
spi_sirfsoc_config_mode(struct spi_device * spi)791 static int spi_sirfsoc_config_mode(struct spi_device *spi)
792 {
793 	struct sirfsoc_spi *sspi;
794 	u32 regval, usp_mode1;
795 
796 	sspi = spi_master_get_devdata(spi->master);
797 	regval = readl(sspi->base + sspi->regs->spi_ctrl);
798 	usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
799 	if (!(spi->mode & SPI_CS_HIGH)) {
800 		regval |= SIRFSOC_SPI_CS_IDLE_STAT;
801 		usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
802 	} else {
803 		regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
804 		usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
805 	}
806 	if (!(spi->mode & SPI_LSB_FIRST)) {
807 		regval |= SIRFSOC_SPI_TRAN_MSB;
808 		usp_mode1 &= ~SIRFSOC_USP_LSB;
809 	} else {
810 		regval &= ~SIRFSOC_SPI_TRAN_MSB;
811 		usp_mode1 |= SIRFSOC_USP_LSB;
812 	}
813 	if (spi->mode & SPI_CPOL) {
814 		regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
815 		usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
816 	} else {
817 		regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
818 		usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
819 	}
820 	/*
821 	 * Data should be driven at least 1/2 cycle before the fetch edge
822 	 * to make sure that data gets stable at the fetch edge.
823 	 */
824 	if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
825 	    (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
826 		regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
827 		usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
828 				SIRFSOC_USP_RXD_FALLING_EDGE);
829 	} else {
830 		regval |= SIRFSOC_SPI_DRV_POS_EDGE;
831 		usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
832 				SIRFSOC_USP_TXD_FALLING_EDGE);
833 	}
834 	writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
835 		SIRFSOC_SPI_FIFO_SC_OFFSET) |
836 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
837 		SIRFSOC_SPI_FIFO_LC_OFFSET) |
838 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
839 		SIRFSOC_SPI_FIFO_HC_OFFSET),
840 		sspi->base + sspi->regs->txfifo_level_chk);
841 	writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
842 		SIRFSOC_SPI_FIFO_SC_OFFSET) |
843 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
844 		SIRFSOC_SPI_FIFO_LC_OFFSET) |
845 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
846 		SIRFSOC_SPI_FIFO_HC_OFFSET),
847 		sspi->base + sspi->regs->rxfifo_level_chk);
848 	/*
849 	 * it should never set to hardware cs mode because in hardware cs mode,
850 	 * cs signal can't controlled by driver.
851 	 */
852 	switch (sspi->type) {
853 	case SIRF_REAL_SPI:
854 		regval |= SIRFSOC_SPI_CS_IO_MODE;
855 		writel(regval, sspi->base + sspi->regs->spi_ctrl);
856 		break;
857 	case SIRF_USP_SPI_P2:
858 	case SIRF_USP_SPI_A7:
859 		usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
860 		usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
861 		usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
862 		writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
863 		break;
864 	}
865 
866 	return 0;
867 }
868 
869 static int
spi_sirfsoc_setup_transfer(struct spi_device * spi,struct spi_transfer * t)870 spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
871 {
872 	struct sirfsoc_spi *sspi;
873 	u8 bits_per_word = 0;
874 	int hz = 0;
875 	u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
876 
877 	sspi = spi_master_get_devdata(spi->master);
878 
879 	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
880 	hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
881 
882 	usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
883 	if (regval > 0xFFFF || regval < 0) {
884 		dev_err(&spi->dev, "Speed %d not supported\n", hz);
885 		return -EINVAL;
886 	}
887 	switch (bits_per_word) {
888 	case 8:
889 		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
890 		sspi->rx_word = spi_sirfsoc_rx_word_u8;
891 		sspi->tx_word = spi_sirfsoc_tx_word_u8;
892 		break;
893 	case 12:
894 	case 16:
895 		regval |= (bits_per_word ==  12) ?
896 			SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
897 			SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
898 		sspi->rx_word = spi_sirfsoc_rx_word_u16;
899 		sspi->tx_word = spi_sirfsoc_tx_word_u16;
900 		break;
901 	case 32:
902 		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
903 		sspi->rx_word = spi_sirfsoc_rx_word_u32;
904 		sspi->tx_word = spi_sirfsoc_tx_word_u32;
905 		break;
906 	default:
907 		dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
908 		return -EINVAL;
909 	}
910 	sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
911 	txfifo_ctrl = (((sspi->fifo_size / 2) &
912 			SIRFSOC_SPI_FIFO_THD_MASK(sspi))
913 			<< SIRFSOC_SPI_FIFO_THD_OFFSET) |
914 			(sspi->word_width >> 1);
915 	rxfifo_ctrl = (((sspi->fifo_size / 2) &
916 			SIRFSOC_SPI_FIFO_THD_MASK(sspi))
917 			<< SIRFSOC_SPI_FIFO_THD_OFFSET) |
918 			(sspi->word_width >> 1);
919 	writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
920 	writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
921 	if (sspi->type == SIRF_USP_SPI_P2 ||
922 		sspi->type == SIRF_USP_SPI_A7) {
923 		tx_frm_ctl = 0;
924 		tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
925 				<< SIRFSOC_USP_TX_DATA_OFFSET;
926 		tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
927 				- 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
928 				SIRFSOC_USP_TX_SYNC_OFFSET;
929 		tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
930 				+ 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
931 				SIRFSOC_USP_TX_FRAME_OFFSET;
932 		tx_frm_ctl |= ((bits_per_word - 1) &
933 				SIRFSOC_USP_TX_SHIFTER_MASK) <<
934 				SIRFSOC_USP_TX_SHIFTER_OFFSET;
935 		rx_frm_ctl = 0;
936 		rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
937 				<< SIRFSOC_USP_RX_DATA_OFFSET;
938 		rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
939 				+ 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
940 				SIRFSOC_USP_RX_FRAME_OFFSET;
941 		rx_frm_ctl |= ((bits_per_word - 1)
942 				& SIRFSOC_USP_RX_SHIFTER_MASK) <<
943 				SIRFSOC_USP_RX_SHIFTER_OFFSET;
944 		writel(tx_frm_ctl | (((usp_mode2 >> 10) &
945 			SIRFSOC_USP_CLK_10_11_MASK) <<
946 			SIRFSOC_USP_CLK_10_11_OFFSET),
947 			sspi->base + sspi->regs->usp_tx_frame_ctrl);
948 		writel(rx_frm_ctl | (((usp_mode2 >> 12) &
949 			SIRFSOC_USP_CLK_12_15_MASK) <<
950 			SIRFSOC_USP_CLK_12_15_OFFSET),
951 			sspi->base + sspi->regs->usp_rx_frame_ctrl);
952 		writel(readl(sspi->base + sspi->regs->usp_mode2) |
953 			((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
954 			SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
955 			(SIRFSOC_USP_RXD_DELAY_LEN <<
956 			 SIRFSOC_USP_RXD_DELAY_OFFSET) |
957 			(SIRFSOC_USP_TXD_DELAY_LEN <<
958 			 SIRFSOC_USP_TXD_DELAY_OFFSET),
959 			sspi->base + sspi->regs->usp_mode2);
960 	}
961 	if (sspi->type == SIRF_REAL_SPI)
962 		writel(regval, sspi->base + sspi->regs->spi_ctrl);
963 	spi_sirfsoc_config_mode(spi);
964 	if (sspi->type == SIRF_REAL_SPI) {
965 		if (t && t->tx_buf && !t->rx_buf &&
966 			(t->len <= SIRFSOC_MAX_CMD_BYTES)) {
967 			sspi->tx_by_cmd = true;
968 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
969 				(SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
970 				SIRFSOC_SPI_CMD_MODE),
971 				sspi->base + sspi->regs->spi_ctrl);
972 		} else {
973 			sspi->tx_by_cmd = false;
974 			writel(readl(sspi->base + sspi->regs->spi_ctrl) &
975 				~SIRFSOC_SPI_CMD_MODE,
976 				sspi->base + sspi->regs->spi_ctrl);
977 		}
978 	}
979 	if (IS_DMA_VALID(t)) {
980 		/* Enable DMA mode for RX, TX */
981 		writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
982 		writel(SIRFSOC_SPI_RX_DMA_FLUSH,
983 			sspi->base + sspi->regs->rx_dma_io_ctrl);
984 	} else {
985 		/* Enable IO mode for RX, TX */
986 		writel(SIRFSOC_SPI_IO_MODE_SEL,
987 			sspi->base + sspi->regs->tx_dma_io_ctrl);
988 		writel(SIRFSOC_SPI_IO_MODE_SEL,
989 			sspi->base + sspi->regs->rx_dma_io_ctrl);
990 	}
991 	return 0;
992 }
993 
spi_sirfsoc_setup(struct spi_device * spi)994 static int spi_sirfsoc_setup(struct spi_device *spi)
995 {
996 	struct sirfsoc_spi *sspi;
997 	int ret = 0;
998 
999 	sspi = spi_master_get_devdata(spi->master);
1000 	if (spi->cs_gpio == -ENOENT)
1001 		sspi->hw_cs = true;
1002 	else {
1003 		sspi->hw_cs = false;
1004 		if (!spi_get_ctldata(spi)) {
1005 			void *cs = kmalloc(sizeof(int), GFP_KERNEL);
1006 			if (!cs) {
1007 				ret = -ENOMEM;
1008 				goto exit;
1009 			}
1010 			ret = gpio_is_valid(spi->cs_gpio);
1011 			if (!ret) {
1012 				dev_err(&spi->dev, "no valid gpio\n");
1013 				ret = -ENOENT;
1014 				goto exit;
1015 			}
1016 			ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
1017 			if (ret) {
1018 				dev_err(&spi->dev, "failed to request gpio\n");
1019 				goto exit;
1020 			}
1021 			spi_set_ctldata(spi, cs);
1022 		}
1023 	}
1024 	spi_sirfsoc_config_mode(spi);
1025 	spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
1026 exit:
1027 	return ret;
1028 }
1029 
spi_sirfsoc_cleanup(struct spi_device * spi)1030 static void spi_sirfsoc_cleanup(struct spi_device *spi)
1031 {
1032 	if (spi_get_ctldata(spi)) {
1033 		gpio_free(spi->cs_gpio);
1034 		kfree(spi_get_ctldata(spi));
1035 	}
1036 }
1037 
1038 static const struct sirf_spi_comp_data sirf_real_spi = {
1039 	.regs = &real_spi_register,
1040 	.type = SIRF_REAL_SPI,
1041 	.dat_max_frm_len = 64 * 1024,
1042 	.fifo_size = 256,
1043 };
1044 
1045 static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
1046 	.regs = &usp_spi_register,
1047 	.type = SIRF_USP_SPI_P2,
1048 	.dat_max_frm_len = 1024 * 1024,
1049 	.fifo_size = 128,
1050 	.hwinit = sirfsoc_usp_hwinit,
1051 };
1052 
1053 static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
1054 	.regs = &usp_spi_register,
1055 	.type = SIRF_USP_SPI_A7,
1056 	.dat_max_frm_len = 1024 * 1024,
1057 	.fifo_size = 512,
1058 	.hwinit = sirfsoc_usp_hwinit,
1059 };
1060 
1061 static const struct of_device_id spi_sirfsoc_of_match[] = {
1062 	{ .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
1063 	{ .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
1064 	{ .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
1065 	{}
1066 };
1067 MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
1068 
spi_sirfsoc_probe(struct platform_device * pdev)1069 static int spi_sirfsoc_probe(struct platform_device *pdev)
1070 {
1071 	struct sirfsoc_spi *sspi;
1072 	struct spi_master *master;
1073 	const struct sirf_spi_comp_data *spi_comp_data;
1074 	int irq;
1075 	int ret;
1076 	const struct of_device_id *match;
1077 
1078 	ret = device_reset(&pdev->dev);
1079 	if (ret) {
1080 		dev_err(&pdev->dev, "SPI reset failed!\n");
1081 		return ret;
1082 	}
1083 
1084 	master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
1085 	if (!master) {
1086 		dev_err(&pdev->dev, "Unable to allocate SPI master\n");
1087 		return -ENOMEM;
1088 	}
1089 	match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
1090 	platform_set_drvdata(pdev, master);
1091 	sspi = spi_master_get_devdata(master);
1092 	sspi->fifo_full_offset = ilog2(sspi->fifo_size);
1093 	spi_comp_data = match->data;
1094 	sspi->regs = spi_comp_data->regs;
1095 	sspi->type = spi_comp_data->type;
1096 	sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
1097 	sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
1098 	sspi->fifo_size = spi_comp_data->fifo_size;
1099 	sspi->base = devm_platform_ioremap_resource(pdev, 0);
1100 	if (IS_ERR(sspi->base)) {
1101 		ret = PTR_ERR(sspi->base);
1102 		goto free_master;
1103 	}
1104 	irq = platform_get_irq(pdev, 0);
1105 	if (irq < 0) {
1106 		ret = -ENXIO;
1107 		goto free_master;
1108 	}
1109 	ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
1110 				DRIVER_NAME, sspi);
1111 	if (ret)
1112 		goto free_master;
1113 
1114 	sspi->bitbang.master = master;
1115 	sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
1116 	sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
1117 	sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
1118 	sspi->bitbang.master->setup = spi_sirfsoc_setup;
1119 	sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
1120 	master->bus_num = pdev->id;
1121 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
1122 	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
1123 					SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
1124 	master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
1125 	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1126 	sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
1127 
1128 	/* request DMA channels */
1129 	sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
1130 	if (IS_ERR(sspi->rx_chan)) {
1131 		dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1132 		ret = PTR_ERR(sspi->rx_chan);
1133 		goto free_master;
1134 	}
1135 	sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
1136 	if (IS_ERR(sspi->tx_chan)) {
1137 		dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1138 		ret = PTR_ERR(sspi->tx_chan);
1139 		goto free_rx_dma;
1140 	}
1141 
1142 	sspi->clk = clk_get(&pdev->dev, NULL);
1143 	if (IS_ERR(sspi->clk)) {
1144 		ret = PTR_ERR(sspi->clk);
1145 		goto free_tx_dma;
1146 	}
1147 	clk_prepare_enable(sspi->clk);
1148 	if (spi_comp_data->hwinit)
1149 		spi_comp_data->hwinit(sspi);
1150 	sspi->ctrl_freq = clk_get_rate(sspi->clk);
1151 
1152 	init_completion(&sspi->rx_done);
1153 	init_completion(&sspi->tx_done);
1154 
1155 	ret = spi_bitbang_start(&sspi->bitbang);
1156 	if (ret)
1157 		goto free_clk;
1158 	dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
1159 
1160 	return 0;
1161 free_clk:
1162 	clk_disable_unprepare(sspi->clk);
1163 	clk_put(sspi->clk);
1164 free_tx_dma:
1165 	dma_release_channel(sspi->tx_chan);
1166 free_rx_dma:
1167 	dma_release_channel(sspi->rx_chan);
1168 free_master:
1169 	spi_master_put(master);
1170 
1171 	return ret;
1172 }
1173 
spi_sirfsoc_remove(struct platform_device * pdev)1174 static int  spi_sirfsoc_remove(struct platform_device *pdev)
1175 {
1176 	struct spi_master *master;
1177 	struct sirfsoc_spi *sspi;
1178 
1179 	master = platform_get_drvdata(pdev);
1180 	sspi = spi_master_get_devdata(master);
1181 	spi_bitbang_stop(&sspi->bitbang);
1182 	clk_disable_unprepare(sspi->clk);
1183 	clk_put(sspi->clk);
1184 	dma_release_channel(sspi->rx_chan);
1185 	dma_release_channel(sspi->tx_chan);
1186 	spi_master_put(master);
1187 	return 0;
1188 }
1189 
1190 #ifdef CONFIG_PM_SLEEP
spi_sirfsoc_suspend(struct device * dev)1191 static int spi_sirfsoc_suspend(struct device *dev)
1192 {
1193 	struct spi_master *master = dev_get_drvdata(dev);
1194 	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1195 	int ret;
1196 
1197 	ret = spi_master_suspend(master);
1198 	if (ret)
1199 		return ret;
1200 
1201 	clk_disable(sspi->clk);
1202 	return 0;
1203 }
1204 
spi_sirfsoc_resume(struct device * dev)1205 static int spi_sirfsoc_resume(struct device *dev)
1206 {
1207 	struct spi_master *master = dev_get_drvdata(dev);
1208 	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1209 
1210 	clk_enable(sspi->clk);
1211 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
1212 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
1213 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
1214 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
1215 	return 0;
1216 }
1217 #endif
1218 
1219 static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
1220 			 spi_sirfsoc_resume);
1221 
1222 static struct platform_driver spi_sirfsoc_driver = {
1223 	.driver = {
1224 		.name = DRIVER_NAME,
1225 		.pm     = &spi_sirfsoc_pm_ops,
1226 		.of_match_table = spi_sirfsoc_of_match,
1227 	},
1228 	.probe = spi_sirfsoc_probe,
1229 	.remove = spi_sirfsoc_remove,
1230 };
1231 module_platform_driver(spi_sirfsoc_driver);
1232 MODULE_DESCRIPTION("SiRF SoC SPI master driver");
1233 MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
1234 MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
1235 MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
1236 MODULE_LICENSE("GPL v2");
1237