• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPI bus driver for CSR SiRFprimaII
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/of.h>
17 #include <linux/bitops.h>
18 #include <linux/err.h>
19 #include <linux/platform_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi_bitbang.h>
23 #include <linux/dmaengine.h>
24 #include <linux/dma-direction.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/reset.h>
27 
28 #define DRIVER_NAME "sirfsoc_spi"
29 /* SPI CTRL register defines */
30 #define SIRFSOC_SPI_SLV_MODE		BIT(16)
31 #define SIRFSOC_SPI_CMD_MODE		BIT(17)
32 #define SIRFSOC_SPI_CS_IO_OUT		BIT(18)
33 #define SIRFSOC_SPI_CS_IO_MODE		BIT(19)
34 #define SIRFSOC_SPI_CLK_IDLE_STAT	BIT(20)
35 #define SIRFSOC_SPI_CS_IDLE_STAT	BIT(21)
36 #define SIRFSOC_SPI_TRAN_MSB		BIT(22)
37 #define SIRFSOC_SPI_DRV_POS_EDGE	BIT(23)
38 #define SIRFSOC_SPI_CS_HOLD_TIME	BIT(24)
39 #define SIRFSOC_SPI_CLK_SAMPLE_MODE	BIT(25)
40 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8	(0 << 26)
41 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12	(1 << 26)
42 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16	(2 << 26)
43 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32	(3 << 26)
44 #define SIRFSOC_SPI_CMD_BYTE_NUM(x)	((x & 3) << 28)
45 #define SIRFSOC_SPI_ENA_AUTO_CLR	BIT(30)
46 #define SIRFSOC_SPI_MUL_DAT_MODE	BIT(31)
47 
48 /* Interrupt Enable */
49 #define SIRFSOC_SPI_RX_DONE_INT_EN	BIT(0)
50 #define SIRFSOC_SPI_TX_DONE_INT_EN	BIT(1)
51 #define SIRFSOC_SPI_RX_OFLOW_INT_EN	BIT(2)
52 #define SIRFSOC_SPI_TX_UFLOW_INT_EN	BIT(3)
53 #define SIRFSOC_SPI_RX_IO_DMA_INT_EN	BIT(4)
54 #define SIRFSOC_SPI_TX_IO_DMA_INT_EN	BIT(5)
55 #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN	BIT(6)
56 #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN	BIT(7)
57 #define SIRFSOC_SPI_RXFIFO_THD_INT_EN	BIT(8)
58 #define SIRFSOC_SPI_TXFIFO_THD_INT_EN	BIT(9)
59 #define SIRFSOC_SPI_FRM_END_INT_EN	BIT(10)
60 
61 /* Interrupt status */
62 #define SIRFSOC_SPI_RX_DONE		BIT(0)
63 #define SIRFSOC_SPI_TX_DONE		BIT(1)
64 #define SIRFSOC_SPI_RX_OFLOW		BIT(2)
65 #define SIRFSOC_SPI_TX_UFLOW		BIT(3)
66 #define SIRFSOC_SPI_RX_IO_DMA		BIT(4)
67 #define SIRFSOC_SPI_RX_FIFO_FULL	BIT(6)
68 #define SIRFSOC_SPI_TXFIFO_EMPTY	BIT(7)
69 #define SIRFSOC_SPI_RXFIFO_THD_REACH	BIT(8)
70 #define SIRFSOC_SPI_TXFIFO_THD_REACH	BIT(9)
71 #define SIRFSOC_SPI_FRM_END		BIT(10)
72 
73 /* TX RX enable */
74 #define SIRFSOC_SPI_RX_EN		BIT(0)
75 #define SIRFSOC_SPI_TX_EN		BIT(1)
76 #define SIRFSOC_SPI_CMD_TX_EN		BIT(2)
77 
78 #define SIRFSOC_SPI_IO_MODE_SEL		BIT(0)
79 #define SIRFSOC_SPI_RX_DMA_FLUSH	BIT(2)
80 
81 /* FIFO OPs */
82 #define SIRFSOC_SPI_FIFO_RESET		BIT(0)
83 #define SIRFSOC_SPI_FIFO_START		BIT(1)
84 
85 /* FIFO CTRL */
86 #define SIRFSOC_SPI_FIFO_WIDTH_BYTE	(0 << 0)
87 #define SIRFSOC_SPI_FIFO_WIDTH_WORD	(1 << 0)
88 #define SIRFSOC_SPI_FIFO_WIDTH_DWORD	(2 << 0)
89 /* USP related */
90 #define SIRFSOC_USP_SYNC_MODE		BIT(0)
91 #define SIRFSOC_USP_SLV_MODE		BIT(1)
92 #define SIRFSOC_USP_LSB			BIT(4)
93 #define SIRFSOC_USP_EN			BIT(5)
94 #define SIRFSOC_USP_RXD_FALLING_EDGE	BIT(6)
95 #define SIRFSOC_USP_TXD_FALLING_EDGE	BIT(7)
96 #define SIRFSOC_USP_CS_HIGH_VALID	BIT(9)
97 #define SIRFSOC_USP_SCLK_IDLE_STAT	BIT(11)
98 #define SIRFSOC_USP_TFS_IO_MODE		BIT(14)
99 #define SIRFSOC_USP_TFS_IO_INPUT	BIT(19)
100 
101 #define SIRFSOC_USP_RXD_DELAY_LEN_MASK	0xFF
102 #define SIRFSOC_USP_TXD_DELAY_LEN_MASK	0xFF
103 #define SIRFSOC_USP_RXD_DELAY_OFFSET	0
104 #define SIRFSOC_USP_TXD_DELAY_OFFSET	8
105 #define SIRFSOC_USP_RXD_DELAY_LEN	1
106 #define SIRFSOC_USP_TXD_DELAY_LEN	1
107 #define SIRFSOC_USP_CLK_DIVISOR_OFFSET	21
108 #define SIRFSOC_USP_CLK_DIVISOR_MASK	0x3FF
109 #define SIRFSOC_USP_CLK_10_11_MASK	0x3
110 #define SIRFSOC_USP_CLK_10_11_OFFSET	30
111 #define SIRFSOC_USP_CLK_12_15_MASK	0xF
112 #define SIRFSOC_USP_CLK_12_15_OFFSET	24
113 
114 #define SIRFSOC_USP_TX_DATA_OFFSET	0
115 #define SIRFSOC_USP_TX_SYNC_OFFSET	8
116 #define SIRFSOC_USP_TX_FRAME_OFFSET	16
117 #define SIRFSOC_USP_TX_SHIFTER_OFFSET	24
118 
119 #define SIRFSOC_USP_TX_DATA_MASK	0xFF
120 #define SIRFSOC_USP_TX_SYNC_MASK	0xFF
121 #define SIRFSOC_USP_TX_FRAME_MASK	0xFF
122 #define SIRFSOC_USP_TX_SHIFTER_MASK	0x1F
123 
124 #define SIRFSOC_USP_RX_DATA_OFFSET	0
125 #define SIRFSOC_USP_RX_FRAME_OFFSET	8
126 #define SIRFSOC_USP_RX_SHIFTER_OFFSET	16
127 
128 #define SIRFSOC_USP_RX_DATA_MASK	0xFF
129 #define SIRFSOC_USP_RX_FRAME_MASK	0xFF
130 #define SIRFSOC_USP_RX_SHIFTER_MASK	0x1F
131 #define SIRFSOC_USP_CS_HIGH_VALUE	BIT(1)
132 
133 #define SIRFSOC_SPI_FIFO_SC_OFFSET	0
134 #define SIRFSOC_SPI_FIFO_LC_OFFSET	10
135 #define SIRFSOC_SPI_FIFO_HC_OFFSET	20
136 
137 #define SIRFSOC_SPI_FIFO_FULL_MASK(s)	(1 << ((s)->fifo_full_offset))
138 #define SIRFSOC_SPI_FIFO_EMPTY_MASK(s)	(1 << ((s)->fifo_full_offset + 1))
139 #define SIRFSOC_SPI_FIFO_THD_MASK(s)	((s)->fifo_size - 1)
140 #define SIRFSOC_SPI_FIFO_THD_OFFSET	2
141 #define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val)	\
142 	((val) & (s)->fifo_level_chk_mask)
143 
144 enum sirf_spi_type {
145 	SIRF_REAL_SPI,
146 	SIRF_USP_SPI_P2,
147 	SIRF_USP_SPI_A7,
148 };
149 
150 /*
151  * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
152  * due to the limitation of dma controller
153  */
154 
155 #define ALIGNED(x) (!((u32)x & 0x3))
156 #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
157 	ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
158 
159 #define SIRFSOC_MAX_CMD_BYTES	4
160 #define SIRFSOC_SPI_DEFAULT_FRQ 1000000
161 
162 struct sirf_spi_register {
163 	/*SPI and USP-SPI common*/
164 	u32 tx_rx_en;
165 	u32 int_en;
166 	u32 int_st;
167 	u32 tx_dma_io_ctrl;
168 	u32 tx_dma_io_len;
169 	u32 txfifo_ctrl;
170 	u32 txfifo_level_chk;
171 	u32 txfifo_op;
172 	u32 txfifo_st;
173 	u32 txfifo_data;
174 	u32 rx_dma_io_ctrl;
175 	u32 rx_dma_io_len;
176 	u32 rxfifo_ctrl;
177 	u32 rxfifo_level_chk;
178 	u32 rxfifo_op;
179 	u32 rxfifo_st;
180 	u32 rxfifo_data;
181 	/*SPI self*/
182 	u32 spi_ctrl;
183 	u32 spi_cmd;
184 	u32 spi_dummy_delay_ctrl;
185 	/*USP-SPI self*/
186 	u32 usp_mode1;
187 	u32 usp_mode2;
188 	u32 usp_tx_frame_ctrl;
189 	u32 usp_rx_frame_ctrl;
190 	u32 usp_pin_io_data;
191 	u32 usp_risc_dsp_mode;
192 	u32 usp_async_param_reg;
193 	u32 usp_irda_x_mode_div;
194 	u32 usp_sm_cfg;
195 	u32 usp_int_en_clr;
196 };
197 
198 static const struct sirf_spi_register real_spi_register = {
199 	.tx_rx_en		= 0x8,
200 	.int_en		= 0xc,
201 	.int_st		= 0x10,
202 	.tx_dma_io_ctrl	= 0x100,
203 	.tx_dma_io_len	= 0x104,
204 	.txfifo_ctrl	= 0x108,
205 	.txfifo_level_chk	= 0x10c,
206 	.txfifo_op		= 0x110,
207 	.txfifo_st		= 0x114,
208 	.txfifo_data	= 0x118,
209 	.rx_dma_io_ctrl	= 0x120,
210 	.rx_dma_io_len	= 0x124,
211 	.rxfifo_ctrl	= 0x128,
212 	.rxfifo_level_chk	= 0x12c,
213 	.rxfifo_op		= 0x130,
214 	.rxfifo_st		= 0x134,
215 	.rxfifo_data	= 0x138,
216 	.spi_ctrl		= 0x0,
217 	.spi_cmd		= 0x4,
218 	.spi_dummy_delay_ctrl	= 0x144,
219 };
220 
221 static const struct sirf_spi_register usp_spi_register = {
222 	.tx_rx_en		= 0x10,
223 	.int_en		= 0x14,
224 	.int_st		= 0x18,
225 	.tx_dma_io_ctrl	= 0x100,
226 	.tx_dma_io_len	= 0x104,
227 	.txfifo_ctrl	= 0x108,
228 	.txfifo_level_chk	= 0x10c,
229 	.txfifo_op		= 0x110,
230 	.txfifo_st		= 0x114,
231 	.txfifo_data	= 0x118,
232 	.rx_dma_io_ctrl	= 0x120,
233 	.rx_dma_io_len	= 0x124,
234 	.rxfifo_ctrl	= 0x128,
235 	.rxfifo_level_chk	= 0x12c,
236 	.rxfifo_op		= 0x130,
237 	.rxfifo_st		= 0x134,
238 	.rxfifo_data	= 0x138,
239 	.usp_mode1		= 0x0,
240 	.usp_mode2		= 0x4,
241 	.usp_tx_frame_ctrl	= 0x8,
242 	.usp_rx_frame_ctrl	= 0xc,
243 	.usp_pin_io_data	= 0x1c,
244 	.usp_risc_dsp_mode	= 0x20,
245 	.usp_async_param_reg	= 0x24,
246 	.usp_irda_x_mode_div	= 0x28,
247 	.usp_sm_cfg		= 0x2c,
248 	.usp_int_en_clr		= 0x140,
249 };
250 
251 struct sirfsoc_spi {
252 	struct spi_bitbang bitbang;
253 	struct completion rx_done;
254 	struct completion tx_done;
255 
256 	void __iomem *base;
257 	u32 ctrl_freq;  /* SPI controller clock speed */
258 	struct clk *clk;
259 
260 	/* rx & tx bufs from the spi_transfer */
261 	const void *tx;
262 	void *rx;
263 
264 	/* place received word into rx buffer */
265 	void (*rx_word) (struct sirfsoc_spi *);
266 	/* get word from tx buffer for sending */
267 	void (*tx_word) (struct sirfsoc_spi *);
268 
269 	/* number of words left to be tranmitted/received */
270 	unsigned int left_tx_word;
271 	unsigned int left_rx_word;
272 
273 	/* rx & tx DMA channels */
274 	struct dma_chan *rx_chan;
275 	struct dma_chan *tx_chan;
276 	dma_addr_t src_start;
277 	dma_addr_t dst_start;
278 	int word_width; /* in bytes */
279 
280 	/*
281 	 * if tx size is not more than 4 and rx size is NULL, use
282 	 * command model
283 	 */
284 	bool	tx_by_cmd;
285 	bool	hw_cs;
286 	enum sirf_spi_type type;
287 	const struct sirf_spi_register *regs;
288 	unsigned int fifo_size;
289 	/* fifo empty offset is (fifo full offset + 1)*/
290 	unsigned int fifo_full_offset;
291 	/* fifo_level_chk_mask is (fifo_size/4 - 1) */
292 	unsigned int fifo_level_chk_mask;
293 	unsigned int dat_max_frm_len;
294 };
295 
296 struct sirf_spi_comp_data {
297 	const struct sirf_spi_register *regs;
298 	enum sirf_spi_type type;
299 	unsigned int dat_max_frm_len;
300 	unsigned int fifo_size;
301 	void (*hwinit)(struct sirfsoc_spi *sspi);
302 };
303 
sirfsoc_usp_hwinit(struct sirfsoc_spi * sspi)304 static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
305 {
306 	/* reset USP and let USP can operate */
307 	writel(readl(sspi->base + sspi->regs->usp_mode1) &
308 		~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
309 	writel(readl(sspi->base + sspi->regs->usp_mode1) |
310 		SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
311 }
312 
spi_sirfsoc_rx_word_u8(struct sirfsoc_spi * sspi)313 static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
314 {
315 	u32 data;
316 	u8 *rx = sspi->rx;
317 
318 	data = readl(sspi->base + sspi->regs->rxfifo_data);
319 
320 	if (rx) {
321 		*rx++ = (u8) data;
322 		sspi->rx = rx;
323 	}
324 
325 	sspi->left_rx_word--;
326 }
327 
spi_sirfsoc_tx_word_u8(struct sirfsoc_spi * sspi)328 static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
329 {
330 	u32 data = 0;
331 	const u8 *tx = sspi->tx;
332 
333 	if (tx) {
334 		data = *tx++;
335 		sspi->tx = tx;
336 	}
337 	writel(data, sspi->base + sspi->regs->txfifo_data);
338 	sspi->left_tx_word--;
339 }
340 
spi_sirfsoc_rx_word_u16(struct sirfsoc_spi * sspi)341 static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
342 {
343 	u32 data;
344 	u16 *rx = sspi->rx;
345 
346 	data = readl(sspi->base + sspi->regs->rxfifo_data);
347 
348 	if (rx) {
349 		*rx++ = (u16) data;
350 		sspi->rx = rx;
351 	}
352 
353 	sspi->left_rx_word--;
354 }
355 
spi_sirfsoc_tx_word_u16(struct sirfsoc_spi * sspi)356 static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
357 {
358 	u32 data = 0;
359 	const u16 *tx = sspi->tx;
360 
361 	if (tx) {
362 		data = *tx++;
363 		sspi->tx = tx;
364 	}
365 
366 	writel(data, sspi->base + sspi->regs->txfifo_data);
367 	sspi->left_tx_word--;
368 }
369 
spi_sirfsoc_rx_word_u32(struct sirfsoc_spi * sspi)370 static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
371 {
372 	u32 data;
373 	u32 *rx = sspi->rx;
374 
375 	data = readl(sspi->base + sspi->regs->rxfifo_data);
376 
377 	if (rx) {
378 		*rx++ = (u32) data;
379 		sspi->rx = rx;
380 	}
381 
382 	sspi->left_rx_word--;
383 
384 }
385 
spi_sirfsoc_tx_word_u32(struct sirfsoc_spi * sspi)386 static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
387 {
388 	u32 data = 0;
389 	const u32 *tx = sspi->tx;
390 
391 	if (tx) {
392 		data = *tx++;
393 		sspi->tx = tx;
394 	}
395 
396 	writel(data, sspi->base + sspi->regs->txfifo_data);
397 	sspi->left_tx_word--;
398 }
399 
spi_sirfsoc_irq(int irq,void * dev_id)400 static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
401 {
402 	struct sirfsoc_spi *sspi = dev_id;
403 	u32 spi_stat;
404 
405 	spi_stat = readl(sspi->base + sspi->regs->int_st);
406 	if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
407 		&& (spi_stat & SIRFSOC_SPI_FRM_END)) {
408 		complete(&sspi->tx_done);
409 		writel(0x0, sspi->base + sspi->regs->int_en);
410 		writel(readl(sspi->base + sspi->regs->int_st),
411 				sspi->base + sspi->regs->int_st);
412 		return IRQ_HANDLED;
413 	}
414 	/* Error Conditions */
415 	if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
416 			spi_stat & SIRFSOC_SPI_TX_UFLOW) {
417 		complete(&sspi->tx_done);
418 		complete(&sspi->rx_done);
419 		switch (sspi->type) {
420 		case SIRF_REAL_SPI:
421 		case SIRF_USP_SPI_P2:
422 			writel(0x0, sspi->base + sspi->regs->int_en);
423 			break;
424 		case SIRF_USP_SPI_A7:
425 			writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
426 			break;
427 		}
428 		writel(readl(sspi->base + sspi->regs->int_st),
429 				sspi->base + sspi->regs->int_st);
430 		return IRQ_HANDLED;
431 	}
432 	if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
433 		complete(&sspi->tx_done);
434 	while (!(readl(sspi->base + sspi->regs->int_st) &
435 		SIRFSOC_SPI_RX_IO_DMA))
436 		cpu_relax();
437 	complete(&sspi->rx_done);
438 	switch (sspi->type) {
439 	case SIRF_REAL_SPI:
440 	case SIRF_USP_SPI_P2:
441 		writel(0x0, sspi->base + sspi->regs->int_en);
442 		break;
443 	case SIRF_USP_SPI_A7:
444 		writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
445 		break;
446 	}
447 	writel(readl(sspi->base + sspi->regs->int_st),
448 			sspi->base + sspi->regs->int_st);
449 
450 	return IRQ_HANDLED;
451 }
452 
spi_sirfsoc_dma_fini_callback(void * data)453 static void spi_sirfsoc_dma_fini_callback(void *data)
454 {
455 	struct completion *dma_complete = data;
456 
457 	complete(dma_complete);
458 }
459 
spi_sirfsoc_cmd_transfer(struct spi_device * spi,struct spi_transfer * t)460 static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
461 	struct spi_transfer *t)
462 {
463 	struct sirfsoc_spi *sspi;
464 	int timeout = t->len * 10;
465 	u32 cmd;
466 
467 	sspi = spi_master_get_devdata(spi->master);
468 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
469 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
470 	memcpy(&cmd, sspi->tx, t->len);
471 	if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
472 		cmd = cpu_to_be32(cmd) >>
473 			((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
474 	if (sspi->word_width == 2 && t->len == 4 &&
475 			(!(spi->mode & SPI_LSB_FIRST)))
476 		cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
477 	writel(cmd, sspi->base + sspi->regs->spi_cmd);
478 	writel(SIRFSOC_SPI_FRM_END_INT_EN,
479 		sspi->base + sspi->regs->int_en);
480 	writel(SIRFSOC_SPI_CMD_TX_EN,
481 		sspi->base + sspi->regs->tx_rx_en);
482 	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
483 		dev_err(&spi->dev, "cmd transfer timeout\n");
484 		return;
485 	}
486 	sspi->left_rx_word -= t->len;
487 }
488 
spi_sirfsoc_dma_transfer(struct spi_device * spi,struct spi_transfer * t)489 static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
490 	struct spi_transfer *t)
491 {
492 	struct sirfsoc_spi *sspi;
493 	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
494 	int timeout = t->len * 10;
495 
496 	sspi = spi_master_get_devdata(spi->master);
497 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
498 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
499 	switch (sspi->type) {
500 	case SIRF_REAL_SPI:
501 		writel(SIRFSOC_SPI_FIFO_START,
502 			sspi->base + sspi->regs->rxfifo_op);
503 		writel(SIRFSOC_SPI_FIFO_START,
504 			sspi->base + sspi->regs->txfifo_op);
505 		writel(0, sspi->base + sspi->regs->int_en);
506 		break;
507 	case SIRF_USP_SPI_P2:
508 		writel(0x0, sspi->base + sspi->regs->rxfifo_op);
509 		writel(0x0, sspi->base + sspi->regs->txfifo_op);
510 		writel(0, sspi->base + sspi->regs->int_en);
511 		break;
512 	case SIRF_USP_SPI_A7:
513 		writel(0x0, sspi->base + sspi->regs->rxfifo_op);
514 		writel(0x0, sspi->base + sspi->regs->txfifo_op);
515 		writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
516 		break;
517 	}
518 	writel(readl(sspi->base + sspi->regs->int_st),
519 		sspi->base + sspi->regs->int_st);
520 	if (sspi->left_tx_word < sspi->dat_max_frm_len) {
521 		switch (sspi->type) {
522 		case SIRF_REAL_SPI:
523 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
524 				SIRFSOC_SPI_ENA_AUTO_CLR |
525 				SIRFSOC_SPI_MUL_DAT_MODE,
526 				sspi->base + sspi->regs->spi_ctrl);
527 			writel(sspi->left_tx_word - 1,
528 				sspi->base + sspi->regs->tx_dma_io_len);
529 			writel(sspi->left_tx_word - 1,
530 				sspi->base + sspi->regs->rx_dma_io_len);
531 			break;
532 		case SIRF_USP_SPI_P2:
533 		case SIRF_USP_SPI_A7:
534 			/*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
535 			writel(sspi->left_tx_word * sspi->word_width,
536 				sspi->base + sspi->regs->tx_dma_io_len);
537 			writel(sspi->left_tx_word * sspi->word_width,
538 				sspi->base + sspi->regs->rx_dma_io_len);
539 			break;
540 		}
541 	} else {
542 		if (sspi->type == SIRF_REAL_SPI)
543 			writel(readl(sspi->base + sspi->regs->spi_ctrl),
544 				sspi->base + sspi->regs->spi_ctrl);
545 		writel(0, sspi->base + sspi->regs->tx_dma_io_len);
546 		writel(0, sspi->base + sspi->regs->rx_dma_io_len);
547 	}
548 	sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
549 					(t->tx_buf != t->rx_buf) ?
550 					DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
551 	rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
552 		sspi->dst_start, t->len, DMA_DEV_TO_MEM,
553 		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
554 	rx_desc->callback = spi_sirfsoc_dma_fini_callback;
555 	rx_desc->callback_param = &sspi->rx_done;
556 
557 	sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
558 					(t->tx_buf != t->rx_buf) ?
559 					DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
560 	tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
561 		sspi->src_start, t->len, DMA_MEM_TO_DEV,
562 		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
563 	tx_desc->callback = spi_sirfsoc_dma_fini_callback;
564 	tx_desc->callback_param = &sspi->tx_done;
565 
566 	dmaengine_submit(tx_desc);
567 	dmaengine_submit(rx_desc);
568 	dma_async_issue_pending(sspi->tx_chan);
569 	dma_async_issue_pending(sspi->rx_chan);
570 	writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
571 			sspi->base + sspi->regs->tx_rx_en);
572 	if (sspi->type == SIRF_USP_SPI_P2 ||
573 		sspi->type == SIRF_USP_SPI_A7) {
574 		writel(SIRFSOC_SPI_FIFO_START,
575 			sspi->base + sspi->regs->rxfifo_op);
576 		writel(SIRFSOC_SPI_FIFO_START,
577 			sspi->base + sspi->regs->txfifo_op);
578 	}
579 	if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
580 		dev_err(&spi->dev, "transfer timeout\n");
581 		dmaengine_terminate_all(sspi->rx_chan);
582 	} else
583 		sspi->left_rx_word = 0;
584 	/*
585 	 * we only wait tx-done event if transferring by DMA. for PIO,
586 	 * we get rx data by writing tx data, so if rx is done, tx has
587 	 * done earlier
588 	 */
589 	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
590 		dev_err(&spi->dev, "transfer timeout\n");
591 		if (sspi->type == SIRF_USP_SPI_P2 ||
592 			sspi->type == SIRF_USP_SPI_A7)
593 			writel(0, sspi->base + sspi->regs->tx_rx_en);
594 		dmaengine_terminate_all(sspi->tx_chan);
595 	}
596 	dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
597 	dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
598 	/* TX, RX FIFO stop */
599 	writel(0, sspi->base + sspi->regs->rxfifo_op);
600 	writel(0, sspi->base + sspi->regs->txfifo_op);
601 	if (sspi->left_tx_word >= sspi->dat_max_frm_len)
602 		writel(0, sspi->base + sspi->regs->tx_rx_en);
603 	if (sspi->type == SIRF_USP_SPI_P2 ||
604 		sspi->type == SIRF_USP_SPI_A7)
605 		writel(0, sspi->base + sspi->regs->tx_rx_en);
606 }
607 
spi_sirfsoc_pio_transfer(struct spi_device * spi,struct spi_transfer * t)608 static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
609 		struct spi_transfer *t)
610 {
611 	struct sirfsoc_spi *sspi;
612 	int timeout = t->len * 10;
613 	unsigned int data_units;
614 
615 	sspi = spi_master_get_devdata(spi->master);
616 	do {
617 		writel(SIRFSOC_SPI_FIFO_RESET,
618 			sspi->base + sspi->regs->rxfifo_op);
619 		writel(SIRFSOC_SPI_FIFO_RESET,
620 			sspi->base + sspi->regs->txfifo_op);
621 		switch (sspi->type) {
622 		case SIRF_USP_SPI_P2:
623 			writel(0x0, sspi->base + sspi->regs->rxfifo_op);
624 			writel(0x0, sspi->base + sspi->regs->txfifo_op);
625 			writel(0, sspi->base + sspi->regs->int_en);
626 			writel(readl(sspi->base + sspi->regs->int_st),
627 				sspi->base + sspi->regs->int_st);
628 			writel(min((sspi->left_tx_word * sspi->word_width),
629 				sspi->fifo_size),
630 				sspi->base + sspi->regs->tx_dma_io_len);
631 			writel(min((sspi->left_rx_word * sspi->word_width),
632 				sspi->fifo_size),
633 				sspi->base + sspi->regs->rx_dma_io_len);
634 			break;
635 		case SIRF_USP_SPI_A7:
636 			writel(0x0, sspi->base + sspi->regs->rxfifo_op);
637 			writel(0x0, sspi->base + sspi->regs->txfifo_op);
638 			writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
639 			writel(readl(sspi->base + sspi->regs->int_st),
640 				sspi->base + sspi->regs->int_st);
641 			writel(min((sspi->left_tx_word * sspi->word_width),
642 				sspi->fifo_size),
643 				sspi->base + sspi->regs->tx_dma_io_len);
644 			writel(min((sspi->left_rx_word * sspi->word_width),
645 				sspi->fifo_size),
646 				sspi->base + sspi->regs->rx_dma_io_len);
647 			break;
648 		case SIRF_REAL_SPI:
649 			writel(SIRFSOC_SPI_FIFO_START,
650 				sspi->base + sspi->regs->rxfifo_op);
651 			writel(SIRFSOC_SPI_FIFO_START,
652 				sspi->base + sspi->regs->txfifo_op);
653 			writel(0, sspi->base + sspi->regs->int_en);
654 			writel(readl(sspi->base + sspi->regs->int_st),
655 				sspi->base + sspi->regs->int_st);
656 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
657 				SIRFSOC_SPI_MUL_DAT_MODE |
658 				SIRFSOC_SPI_ENA_AUTO_CLR,
659 				sspi->base + sspi->regs->spi_ctrl);
660 			data_units = sspi->fifo_size / sspi->word_width;
661 			writel(min(sspi->left_tx_word, data_units) - 1,
662 				sspi->base + sspi->regs->tx_dma_io_len);
663 			writel(min(sspi->left_rx_word, data_units) - 1,
664 				sspi->base + sspi->regs->rx_dma_io_len);
665 			break;
666 		}
667 		while (!((readl(sspi->base + sspi->regs->txfifo_st)
668 			& SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
669 			sspi->left_tx_word)
670 			sspi->tx_word(sspi);
671 		writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
672 			SIRFSOC_SPI_TX_UFLOW_INT_EN |
673 			SIRFSOC_SPI_RX_OFLOW_INT_EN |
674 			SIRFSOC_SPI_RX_IO_DMA_INT_EN,
675 			sspi->base + sspi->regs->int_en);
676 		writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
677 			sspi->base + sspi->regs->tx_rx_en);
678 		if (sspi->type == SIRF_USP_SPI_P2 ||
679 			sspi->type == SIRF_USP_SPI_A7) {
680 			writel(SIRFSOC_SPI_FIFO_START,
681 				sspi->base + sspi->regs->rxfifo_op);
682 			writel(SIRFSOC_SPI_FIFO_START,
683 				sspi->base + sspi->regs->txfifo_op);
684 		}
685 		if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
686 			!wait_for_completion_timeout(&sspi->rx_done, timeout)) {
687 			dev_err(&spi->dev, "transfer timeout\n");
688 			if (sspi->type == SIRF_USP_SPI_P2 ||
689 				sspi->type == SIRF_USP_SPI_A7)
690 				writel(0, sspi->base + sspi->regs->tx_rx_en);
691 			break;
692 		}
693 		while (!((readl(sspi->base + sspi->regs->rxfifo_st)
694 			& SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
695 			sspi->left_rx_word)
696 			sspi->rx_word(sspi);
697 		if (sspi->type == SIRF_USP_SPI_P2 ||
698 			sspi->type == SIRF_USP_SPI_A7)
699 			writel(0, sspi->base + sspi->regs->tx_rx_en);
700 		writel(0, sspi->base + sspi->regs->rxfifo_op);
701 		writel(0, sspi->base + sspi->regs->txfifo_op);
702 	} while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
703 }
704 
spi_sirfsoc_transfer(struct spi_device * spi,struct spi_transfer * t)705 static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
706 {
707 	struct sirfsoc_spi *sspi;
708 
709 	sspi = spi_master_get_devdata(spi->master);
710 	sspi->tx = t->tx_buf;
711 	sspi->rx = t->rx_buf;
712 	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
713 	reinit_completion(&sspi->rx_done);
714 	reinit_completion(&sspi->tx_done);
715 	/*
716 	 * in the transfer, if transfer data using command register with rx_buf
717 	 * null, just fill command data into command register and wait for its
718 	 * completion.
719 	 */
720 	if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
721 		spi_sirfsoc_cmd_transfer(spi, t);
722 	else if (IS_DMA_VALID(t))
723 		spi_sirfsoc_dma_transfer(spi, t);
724 	else
725 		spi_sirfsoc_pio_transfer(spi, t);
726 
727 	return t->len - sspi->left_rx_word * sspi->word_width;
728 }
729 
spi_sirfsoc_chipselect(struct spi_device * spi,int value)730 static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
731 {
732 	struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
733 
734 	if (sspi->hw_cs) {
735 		u32 regval;
736 
737 		switch (sspi->type) {
738 		case SIRF_REAL_SPI:
739 			regval = readl(sspi->base + sspi->regs->spi_ctrl);
740 			switch (value) {
741 			case BITBANG_CS_ACTIVE:
742 				if (spi->mode & SPI_CS_HIGH)
743 					regval |= SIRFSOC_SPI_CS_IO_OUT;
744 				else
745 					regval &= ~SIRFSOC_SPI_CS_IO_OUT;
746 				break;
747 			case BITBANG_CS_INACTIVE:
748 				if (spi->mode & SPI_CS_HIGH)
749 					regval &= ~SIRFSOC_SPI_CS_IO_OUT;
750 				else
751 					regval |= SIRFSOC_SPI_CS_IO_OUT;
752 				break;
753 			}
754 			writel(regval, sspi->base + sspi->regs->spi_ctrl);
755 			break;
756 		case SIRF_USP_SPI_P2:
757 		case SIRF_USP_SPI_A7:
758 			regval = readl(sspi->base +
759 					sspi->regs->usp_pin_io_data);
760 			switch (value) {
761 			case BITBANG_CS_ACTIVE:
762 				if (spi->mode & SPI_CS_HIGH)
763 					regval |= SIRFSOC_USP_CS_HIGH_VALUE;
764 				else
765 					regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
766 				break;
767 			case BITBANG_CS_INACTIVE:
768 				if (spi->mode & SPI_CS_HIGH)
769 					regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
770 				else
771 					regval |= SIRFSOC_USP_CS_HIGH_VALUE;
772 				break;
773 			}
774 			writel(regval,
775 				sspi->base + sspi->regs->usp_pin_io_data);
776 			break;
777 		}
778 	} else {
779 		switch (value) {
780 		case BITBANG_CS_ACTIVE:
781 			gpio_direction_output(spi->cs_gpio,
782 					spi->mode & SPI_CS_HIGH ? 1 : 0);
783 			break;
784 		case BITBANG_CS_INACTIVE:
785 			gpio_direction_output(spi->cs_gpio,
786 					spi->mode & SPI_CS_HIGH ? 0 : 1);
787 			break;
788 		}
789 	}
790 }
791 
spi_sirfsoc_config_mode(struct spi_device * spi)792 static int spi_sirfsoc_config_mode(struct spi_device *spi)
793 {
794 	struct sirfsoc_spi *sspi;
795 	u32 regval, usp_mode1;
796 
797 	sspi = spi_master_get_devdata(spi->master);
798 	regval = readl(sspi->base + sspi->regs->spi_ctrl);
799 	usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
800 	if (!(spi->mode & SPI_CS_HIGH)) {
801 		regval |= SIRFSOC_SPI_CS_IDLE_STAT;
802 		usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
803 	} else {
804 		regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
805 		usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
806 	}
807 	if (!(spi->mode & SPI_LSB_FIRST)) {
808 		regval |= SIRFSOC_SPI_TRAN_MSB;
809 		usp_mode1 &= ~SIRFSOC_USP_LSB;
810 	} else {
811 		regval &= ~SIRFSOC_SPI_TRAN_MSB;
812 		usp_mode1 |= SIRFSOC_USP_LSB;
813 	}
814 	if (spi->mode & SPI_CPOL) {
815 		regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
816 		usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
817 	} else {
818 		regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
819 		usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
820 	}
821 	/*
822 	 * Data should be driven at least 1/2 cycle before the fetch edge
823 	 * to make sure that data gets stable at the fetch edge.
824 	 */
825 	if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
826 	    (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
827 		regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
828 		usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
829 				SIRFSOC_USP_RXD_FALLING_EDGE);
830 	} else {
831 		regval |= SIRFSOC_SPI_DRV_POS_EDGE;
832 		usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
833 				SIRFSOC_USP_TXD_FALLING_EDGE);
834 	}
835 	writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
836 		SIRFSOC_SPI_FIFO_SC_OFFSET) |
837 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
838 		SIRFSOC_SPI_FIFO_LC_OFFSET) |
839 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
840 		SIRFSOC_SPI_FIFO_HC_OFFSET),
841 		sspi->base + sspi->regs->txfifo_level_chk);
842 	writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
843 		SIRFSOC_SPI_FIFO_SC_OFFSET) |
844 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
845 		SIRFSOC_SPI_FIFO_LC_OFFSET) |
846 		(SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
847 		SIRFSOC_SPI_FIFO_HC_OFFSET),
848 		sspi->base + sspi->regs->rxfifo_level_chk);
849 	/*
850 	 * it should never set to hardware cs mode because in hardware cs mode,
851 	 * cs signal can't controlled by driver.
852 	 */
853 	switch (sspi->type) {
854 	case SIRF_REAL_SPI:
855 		regval |= SIRFSOC_SPI_CS_IO_MODE;
856 		writel(regval, sspi->base + sspi->regs->spi_ctrl);
857 		break;
858 	case SIRF_USP_SPI_P2:
859 	case SIRF_USP_SPI_A7:
860 		usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
861 		usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
862 		usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
863 		writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
864 		break;
865 	}
866 
867 	return 0;
868 }
869 
870 static int
spi_sirfsoc_setup_transfer(struct spi_device * spi,struct spi_transfer * t)871 spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
872 {
873 	struct sirfsoc_spi *sspi;
874 	u8 bits_per_word = 0;
875 	int hz = 0;
876 	u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
877 
878 	sspi = spi_master_get_devdata(spi->master);
879 
880 	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
881 	hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
882 
883 	usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
884 	if (regval > 0xFFFF || regval < 0) {
885 		dev_err(&spi->dev, "Speed %d not supported\n", hz);
886 		return -EINVAL;
887 	}
888 	switch (bits_per_word) {
889 	case 8:
890 		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
891 		sspi->rx_word = spi_sirfsoc_rx_word_u8;
892 		sspi->tx_word = spi_sirfsoc_tx_word_u8;
893 		break;
894 	case 12:
895 	case 16:
896 		regval |= (bits_per_word ==  12) ?
897 			SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
898 			SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
899 		sspi->rx_word = spi_sirfsoc_rx_word_u16;
900 		sspi->tx_word = spi_sirfsoc_tx_word_u16;
901 		break;
902 	case 32:
903 		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
904 		sspi->rx_word = spi_sirfsoc_rx_word_u32;
905 		sspi->tx_word = spi_sirfsoc_tx_word_u32;
906 		break;
907 	default:
908 		dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
909 		return -EINVAL;
910 	}
911 	sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
912 	txfifo_ctrl = (((sspi->fifo_size / 2) &
913 			SIRFSOC_SPI_FIFO_THD_MASK(sspi))
914 			<< SIRFSOC_SPI_FIFO_THD_OFFSET) |
915 			(sspi->word_width >> 1);
916 	rxfifo_ctrl = (((sspi->fifo_size / 2) &
917 			SIRFSOC_SPI_FIFO_THD_MASK(sspi))
918 			<< SIRFSOC_SPI_FIFO_THD_OFFSET) |
919 			(sspi->word_width >> 1);
920 	writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
921 	writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
922 	if (sspi->type == SIRF_USP_SPI_P2 ||
923 		sspi->type == SIRF_USP_SPI_A7) {
924 		tx_frm_ctl = 0;
925 		tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
926 				<< SIRFSOC_USP_TX_DATA_OFFSET;
927 		tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
928 				- 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
929 				SIRFSOC_USP_TX_SYNC_OFFSET;
930 		tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
931 				+ 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
932 				SIRFSOC_USP_TX_FRAME_OFFSET;
933 		tx_frm_ctl |= ((bits_per_word - 1) &
934 				SIRFSOC_USP_TX_SHIFTER_MASK) <<
935 				SIRFSOC_USP_TX_SHIFTER_OFFSET;
936 		rx_frm_ctl = 0;
937 		rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
938 				<< SIRFSOC_USP_RX_DATA_OFFSET;
939 		rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
940 				+ 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
941 				SIRFSOC_USP_RX_FRAME_OFFSET;
942 		rx_frm_ctl |= ((bits_per_word - 1)
943 				& SIRFSOC_USP_RX_SHIFTER_MASK) <<
944 				SIRFSOC_USP_RX_SHIFTER_OFFSET;
945 		writel(tx_frm_ctl | (((usp_mode2 >> 10) &
946 			SIRFSOC_USP_CLK_10_11_MASK) <<
947 			SIRFSOC_USP_CLK_10_11_OFFSET),
948 			sspi->base + sspi->regs->usp_tx_frame_ctrl);
949 		writel(rx_frm_ctl | (((usp_mode2 >> 12) &
950 			SIRFSOC_USP_CLK_12_15_MASK) <<
951 			SIRFSOC_USP_CLK_12_15_OFFSET),
952 			sspi->base + sspi->regs->usp_rx_frame_ctrl);
953 		writel(readl(sspi->base + sspi->regs->usp_mode2) |
954 			((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
955 			SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
956 			(SIRFSOC_USP_RXD_DELAY_LEN <<
957 			 SIRFSOC_USP_RXD_DELAY_OFFSET) |
958 			(SIRFSOC_USP_TXD_DELAY_LEN <<
959 			 SIRFSOC_USP_TXD_DELAY_OFFSET),
960 			sspi->base + sspi->regs->usp_mode2);
961 	}
962 	if (sspi->type == SIRF_REAL_SPI)
963 		writel(regval, sspi->base + sspi->regs->spi_ctrl);
964 	spi_sirfsoc_config_mode(spi);
965 	if (sspi->type == SIRF_REAL_SPI) {
966 		if (t && t->tx_buf && !t->rx_buf &&
967 			(t->len <= SIRFSOC_MAX_CMD_BYTES)) {
968 			sspi->tx_by_cmd = true;
969 			writel(readl(sspi->base + sspi->regs->spi_ctrl) |
970 				(SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
971 				SIRFSOC_SPI_CMD_MODE),
972 				sspi->base + sspi->regs->spi_ctrl);
973 		} else {
974 			sspi->tx_by_cmd = false;
975 			writel(readl(sspi->base + sspi->regs->spi_ctrl) &
976 				~SIRFSOC_SPI_CMD_MODE,
977 				sspi->base + sspi->regs->spi_ctrl);
978 		}
979 	}
980 	if (IS_DMA_VALID(t)) {
981 		/* Enable DMA mode for RX, TX */
982 		writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
983 		writel(SIRFSOC_SPI_RX_DMA_FLUSH,
984 			sspi->base + sspi->regs->rx_dma_io_ctrl);
985 	} else {
986 		/* Enable IO mode for RX, TX */
987 		writel(SIRFSOC_SPI_IO_MODE_SEL,
988 			sspi->base + sspi->regs->tx_dma_io_ctrl);
989 		writel(SIRFSOC_SPI_IO_MODE_SEL,
990 			sspi->base + sspi->regs->rx_dma_io_ctrl);
991 	}
992 	return 0;
993 }
994 
spi_sirfsoc_setup(struct spi_device * spi)995 static int spi_sirfsoc_setup(struct spi_device *spi)
996 {
997 	struct sirfsoc_spi *sspi;
998 	int ret = 0;
999 
1000 	sspi = spi_master_get_devdata(spi->master);
1001 	if (spi->cs_gpio == -ENOENT)
1002 		sspi->hw_cs = true;
1003 	else {
1004 		sspi->hw_cs = false;
1005 		if (!spi_get_ctldata(spi)) {
1006 			void *cs = kmalloc(sizeof(int), GFP_KERNEL);
1007 			if (!cs) {
1008 				ret = -ENOMEM;
1009 				goto exit;
1010 			}
1011 			ret = gpio_is_valid(spi->cs_gpio);
1012 			if (!ret) {
1013 				dev_err(&spi->dev, "no valid gpio\n");
1014 				ret = -ENOENT;
1015 				goto exit;
1016 			}
1017 			ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
1018 			if (ret) {
1019 				dev_err(&spi->dev, "failed to request gpio\n");
1020 				goto exit;
1021 			}
1022 			spi_set_ctldata(spi, cs);
1023 		}
1024 	}
1025 	spi_sirfsoc_config_mode(spi);
1026 	spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
1027 exit:
1028 	return ret;
1029 }
1030 
spi_sirfsoc_cleanup(struct spi_device * spi)1031 static void spi_sirfsoc_cleanup(struct spi_device *spi)
1032 {
1033 	if (spi_get_ctldata(spi)) {
1034 		gpio_free(spi->cs_gpio);
1035 		kfree(spi_get_ctldata(spi));
1036 	}
1037 }
1038 
1039 static const struct sirf_spi_comp_data sirf_real_spi = {
1040 	.regs = &real_spi_register,
1041 	.type = SIRF_REAL_SPI,
1042 	.dat_max_frm_len = 64 * 1024,
1043 	.fifo_size = 256,
1044 };
1045 
1046 static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
1047 	.regs = &usp_spi_register,
1048 	.type = SIRF_USP_SPI_P2,
1049 	.dat_max_frm_len = 1024 * 1024,
1050 	.fifo_size = 128,
1051 	.hwinit = sirfsoc_usp_hwinit,
1052 };
1053 
1054 static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
1055 	.regs = &usp_spi_register,
1056 	.type = SIRF_USP_SPI_A7,
1057 	.dat_max_frm_len = 1024 * 1024,
1058 	.fifo_size = 512,
1059 	.hwinit = sirfsoc_usp_hwinit,
1060 };
1061 
1062 static const struct of_device_id spi_sirfsoc_of_match[] = {
1063 	{ .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
1064 	{ .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
1065 	{ .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
1066 	{}
1067 };
1068 MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
1069 
spi_sirfsoc_probe(struct platform_device * pdev)1070 static int spi_sirfsoc_probe(struct platform_device *pdev)
1071 {
1072 	struct sirfsoc_spi *sspi;
1073 	struct spi_master *master;
1074 	struct resource *mem_res;
1075 	struct sirf_spi_comp_data *spi_comp_data;
1076 	int irq;
1077 	int ret;
1078 	const struct of_device_id *match;
1079 
1080 	ret = device_reset(&pdev->dev);
1081 	if (ret) {
1082 		dev_err(&pdev->dev, "SPI reset failed!\n");
1083 		return ret;
1084 	}
1085 
1086 	master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
1087 	if (!master) {
1088 		dev_err(&pdev->dev, "Unable to allocate SPI master\n");
1089 		return -ENOMEM;
1090 	}
1091 	match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
1092 	platform_set_drvdata(pdev, master);
1093 	sspi = spi_master_get_devdata(master);
1094 	sspi->fifo_full_offset = ilog2(sspi->fifo_size);
1095 	spi_comp_data = (struct sirf_spi_comp_data *)match->data;
1096 	sspi->regs = spi_comp_data->regs;
1097 	sspi->type = spi_comp_data->type;
1098 	sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
1099 	sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
1100 	sspi->fifo_size = spi_comp_data->fifo_size;
1101 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1102 	sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
1103 	if (IS_ERR(sspi->base)) {
1104 		ret = PTR_ERR(sspi->base);
1105 		goto free_master;
1106 	}
1107 	irq = platform_get_irq(pdev, 0);
1108 	if (irq < 0) {
1109 		ret = -ENXIO;
1110 		goto free_master;
1111 	}
1112 	ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
1113 				DRIVER_NAME, sspi);
1114 	if (ret)
1115 		goto free_master;
1116 
1117 	sspi->bitbang.master = master;
1118 	sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
1119 	sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
1120 	sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
1121 	sspi->bitbang.master->setup = spi_sirfsoc_setup;
1122 	sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
1123 	master->bus_num = pdev->id;
1124 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
1125 	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
1126 					SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
1127 	master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
1128 	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1129 	sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
1130 
1131 	/* request DMA channels */
1132 	sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
1133 	if (!sspi->rx_chan) {
1134 		dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1135 		ret = -ENODEV;
1136 		goto free_master;
1137 	}
1138 	sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
1139 	if (!sspi->tx_chan) {
1140 		dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1141 		ret = -ENODEV;
1142 		goto free_rx_dma;
1143 	}
1144 
1145 	sspi->clk = clk_get(&pdev->dev, NULL);
1146 	if (IS_ERR(sspi->clk)) {
1147 		ret = PTR_ERR(sspi->clk);
1148 		goto free_tx_dma;
1149 	}
1150 	clk_prepare_enable(sspi->clk);
1151 	if (spi_comp_data->hwinit)
1152 		spi_comp_data->hwinit(sspi);
1153 	sspi->ctrl_freq = clk_get_rate(sspi->clk);
1154 
1155 	init_completion(&sspi->rx_done);
1156 	init_completion(&sspi->tx_done);
1157 
1158 	ret = spi_bitbang_start(&sspi->bitbang);
1159 	if (ret)
1160 		goto free_clk;
1161 	dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
1162 
1163 	return 0;
1164 free_clk:
1165 	clk_disable_unprepare(sspi->clk);
1166 	clk_put(sspi->clk);
1167 free_tx_dma:
1168 	dma_release_channel(sspi->tx_chan);
1169 free_rx_dma:
1170 	dma_release_channel(sspi->rx_chan);
1171 free_master:
1172 	spi_master_put(master);
1173 
1174 	return ret;
1175 }
1176 
spi_sirfsoc_remove(struct platform_device * pdev)1177 static int  spi_sirfsoc_remove(struct platform_device *pdev)
1178 {
1179 	struct spi_master *master;
1180 	struct sirfsoc_spi *sspi;
1181 
1182 	master = platform_get_drvdata(pdev);
1183 	sspi = spi_master_get_devdata(master);
1184 	spi_bitbang_stop(&sspi->bitbang);
1185 	clk_disable_unprepare(sspi->clk);
1186 	clk_put(sspi->clk);
1187 	dma_release_channel(sspi->rx_chan);
1188 	dma_release_channel(sspi->tx_chan);
1189 	spi_master_put(master);
1190 	return 0;
1191 }
1192 
1193 #ifdef CONFIG_PM_SLEEP
spi_sirfsoc_suspend(struct device * dev)1194 static int spi_sirfsoc_suspend(struct device *dev)
1195 {
1196 	struct spi_master *master = dev_get_drvdata(dev);
1197 	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1198 	int ret;
1199 
1200 	ret = spi_master_suspend(master);
1201 	if (ret)
1202 		return ret;
1203 
1204 	clk_disable(sspi->clk);
1205 	return 0;
1206 }
1207 
spi_sirfsoc_resume(struct device * dev)1208 static int spi_sirfsoc_resume(struct device *dev)
1209 {
1210 	struct spi_master *master = dev_get_drvdata(dev);
1211 	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1212 
1213 	clk_enable(sspi->clk);
1214 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
1215 	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
1216 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
1217 	writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
1218 	return 0;
1219 }
1220 #endif
1221 
1222 static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
1223 			 spi_sirfsoc_resume);
1224 
1225 static struct platform_driver spi_sirfsoc_driver = {
1226 	.driver = {
1227 		.name = DRIVER_NAME,
1228 		.pm     = &spi_sirfsoc_pm_ops,
1229 		.of_match_table = spi_sirfsoc_of_match,
1230 	},
1231 	.probe = spi_sirfsoc_probe,
1232 	.remove = spi_sirfsoc_remove,
1233 };
1234 module_platform_driver(spi_sirfsoc_driver);
1235 MODULE_DESCRIPTION("SiRF SoC SPI master driver");
1236 MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
1237 MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
1238 MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
1239 MODULE_LICENSE("GPL v2");
1240