• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/spi/spi-sunxi.c
3  *
4  * Copyright (C) 2012 - 2016 Reuuimlla Limited
5  * Pan Nan <pannan@reuuimllatech.com>
6  *
7  * SUNXI SPI Controller Driver
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 of
12  * the License, or (at your option) any later version.
13  *
14  * 2013.5.7 Mintow <duanmintao@allwinnertech.com>
15  *    Adapt to support sun8i/sun9i of Allwinner.
16  *
17  * 2021-3-2 liuyu <liuyu@allwinnertech.com>
18  *	1 : use the new kernel framework to transfer
19  *	2 : support soft cs gpio
20  *	3 : delet unused dts node : cs_bitmap
21  */
22 
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/err.h>
30 #include <linux/clk.h>
31 #include <linux/reset.h>
32 #include <linux/pinctrl/consumer.h>
33 #include <linux/spi/spi.h>
34 #include <linux/gpio.h>
35 #include <linux/platform_device.h>
36 #include <linux/spi/spi_bitbang.h>
37 #include <asm/cacheflush.h>
38 #include <asm/io.h>
39 #include <asm/uaccess.h>
40 #include <linux/sched.h>
41 #include <linux/kthread.h>
42 #include <linux/signal.h>
43 #include <linux/dmaengine.h>
44 #include <linux/dma-mapping.h>
45 //#include <sunxi-clk.h>
46 #include <linux/regulator/consumer.h>
47 #include "spi-sunxi.h"
48 #include "spi-slave-protocol.h"
49 
50 /* For debug */
51 #define SPI_ERR(fmt, arg...)	pr_warn("%s()%d - "fmt, __func__, __LINE__, ##arg)
52 
53 static u32 debug_mask = 1;
54 #define dprintk(level_mask, fmt, arg...)				\
55 do {									\
56 	if (unlikely(debug_mask & level_mask))				\
57 		pr_warn("%s()%d - "fmt, __func__, __LINE__, ##arg);	\
58 } while (0)
59 
60 #define SUNXI_SPI_OK   0
61 #define SUNXI_SPI_FAIL -1
62 
63 #define XFER_TIMEOUT	5000
64 
65 enum spi_mode_type {
66 	SINGLE_HALF_DUPLEX_RX,		/* single mode, half duplex read */
67 	SINGLE_HALF_DUPLEX_TX,		/* single mode, half duplex write */
68 	SINGLE_FULL_DUPLEX_RX_TX,	/* single mode, full duplex read and write */
69 	DUAL_HALF_DUPLEX_RX,		/* dual mode, half duplex read */
70 	DUAL_HALF_DUPLEX_TX,		/* dual mode, half duplex write */
71 	QUAD_HALF_DUPLEX_RX,		/* quad mode, half duplex read */
72 	QUAD_HALF_DUPLEX_TX,		/* quad mode, half duplex write */
73 	MODE_TYPE_NULL,
74 };
75 
76 #if IS_ENABLED(CONFIG_DMA_ENGINE)
77 
78 #define SPI_MAX_PAGES	100
79 enum spi_dma_dir {
80 	SPI_DMA_RWNULL,
81 	SPI_DMA_WDEV = DMA_TO_DEVICE,
82 	SPI_DMA_RDEV = DMA_FROM_DEVICE,
83 };
84 
85 typedef struct {
86 	enum spi_dma_dir dir;
87 	struct dma_chan *chan;
88 	int nents;
89 	struct scatterlist sg[SPI_MAX_PAGES];
90 	struct page *pages[SPI_MAX_PAGES];
91 } spi_dma_info_t;
92 
93 u64 sunxi_spi_dma_mask = DMA_BIT_MASK(32);
94 
95 #endif
96 
97 struct sunxi_spi {
98 	#define SPI_FREE   (1<<0)
99 	#define SPI_SUSPND (1<<1)
100 	#define SPI_BUSY   (1<<2)
101 
102 #if IS_ENABLED(CONFIG_DMA_ENGINE)
103 	spi_dma_info_t dma_rx;
104 	spi_dma_info_t dma_tx;
105 #endif
106 
107 	struct platform_device *pdev;
108 	struct spi_master *master;/* kzalloc */
109 	struct spi_device *spi;
110 	struct spi_dbi_config *dbi_config;
111 	struct sunxi_slave *slave;
112 	struct pinctrl		 *pctrl;
113 
114 	struct clk *pclk;  /* PLL clock */
115 	struct clk *mclk;  /* spi module clock */
116 	struct clk *bus_clk; /*spi bus clock*/
117 	struct reset_control *reset; /*reset clock*/
118 
119 	struct task_struct *task;
120 
121 	struct completion done;  /* wakup another spi transfer */
122 	spinlock_t lock;
123 
124 	char dev_name[48];
125 	enum spi_mode_type mode_type;
126 	void __iomem *base_addr; /* register */
127 
128 	u32 base_addr_phy;
129 	u32 mode; /* 0: master mode, 1: slave mode */
130 	u32 irq; /* irq NO. */
131 	int busy;
132 	int result; /* 0: succeed -1:fail */
133 	int task_flag;
134 	int dbi_enabled;
135 	u32 sample_mode;
136 	u32 sample_delay;
137 };
138 
spi_get_dbi_config(const struct spi_device * spi,struct spi_dbi_config * dbi_config)139 int spi_get_dbi_config(const struct spi_device *spi, struct spi_dbi_config *dbi_config)
140 {
141 	struct sunxi_spi *sspi = spi->master->dev.driver_data;
142 
143 	if (!sspi->dbi_enabled)
144 		return -EINVAL;
145 
146 	memcpy(dbi_config, sspi->dbi_config, sizeof(struct spi_dbi_config));
147 	return 0;
148 }
149 EXPORT_SYMBOL_GPL(spi_get_dbi_config);
150 
spi_set_dbi_config(struct spi_device * spi,const struct spi_dbi_config * dbi_config)151 int spi_set_dbi_config(struct spi_device *spi, const struct spi_dbi_config *dbi_config)
152 {
153 	struct sunxi_spi *sspi = spi->master->dev.driver_data;
154 
155 	if (!sspi->dbi_enabled)
156 		return -EINVAL;
157 
158 	memcpy(sspi->dbi_config, dbi_config, sizeof(struct spi_dbi_config));
159 	return 0;
160 }
161 EXPORT_SYMBOL_GPL(spi_set_dbi_config);
162 
spi_dump_reg(struct sunxi_spi * sspi,u32 offset,u32 len)163 void spi_dump_reg(struct sunxi_spi *sspi, u32 offset, u32 len)
164 {
165 	u32 i;
166 	u8 buf[64], cnt = 0;
167 
168 	for (i = 0; i < len; i = i + REG_INTERVAL) {
169 		if (i%HEXADECIMAL == 0)
170 			cnt += sprintf(buf + cnt, "0x%08x: ",
171 					(u32)(sspi->base_addr_phy  + offset + i));
172 
173 		cnt += sprintf(buf + cnt, "%08x ",
174 				readl(sspi->base_addr + offset + i));
175 
176 		if (i%HEXADECIMAL == REG_CL) {
177 			pr_warn("%s\n", buf);
178 			cnt = 0;
179 		}
180 	}
181 }
182 
spi_dump_data(u8 * buf,u32 len)183 void spi_dump_data(u8 *buf, u32 len)
184 {
185 	u32 i, cnt = 0;
186 	u8 *tmp;
187 
188 	tmp = kzalloc(len, GFP_KERNEL);
189 	if (!tmp)
190 		return;
191 
192 	for (i = 0; i < len; i++) {
193 		if (i%HEXADECIMAL == 0)
194 			cnt += sprintf(tmp + cnt, "0x%08x: ", i);
195 
196 		cnt += sprintf(tmp + cnt, "%02x ", buf[i]);
197 
198 		if ((i%HEXADECIMAL == REG_END) || (i == (len - 1))) {
199 			pr_warn("%s\n", tmp);
200 			cnt = 0;
201 		}
202 	}
203 
204 	kfree(tmp);
205 }
206 
dbi_disable_irq(u32 bitmap,void __iomem * base_addr)207 static void dbi_disable_irq(u32 bitmap, void __iomem *base_addr)
208 {
209 	u32 reg_val = readl(base_addr + SPI_DBI_INT_REG);
210 
211 	bitmap &= DBI_INT_STA_MASK;
212 	reg_val &= ~bitmap;
213 	writel(reg_val, base_addr + SPI_DBI_INT_REG);
214 }
215 
dbi_enable_irq(u32 bitmap,void __iomem * base_addr)216 static void dbi_enable_irq(u32 bitmap, void __iomem *base_addr)
217 {
218 	u32 reg_val = readl(base_addr + SPI_DBI_INT_REG);
219 
220 
221 	bitmap &= DBI_INT_STA_MASK;
222 	reg_val |= bitmap;
223 	writel(reg_val, base_addr + SPI_DBI_INT_REG);
224 }
225 
set_dbi_timer_param(struct sunxi_spi * sspi,struct spi_device * spi)226 static s32 set_dbi_timer_param(struct sunxi_spi *sspi, struct spi_device *spi)
227 {
228 	u32 timer_val = 0, pixel_cycle = 0;
229 	s32 ret = -1;
230 	void __iomem *base_addr = NULL;
231 
232 	if (!sspi || !sspi->base_addr)
233 		goto OUT;
234 
235 	base_addr = sspi->base_addr;
236 
237 	goto OUT; /*not use */
238 
239 	if (sspi->dbi_config->dbi_te_en || !sspi->dbi_config->dbi_fps) {
240 		writel(0x0, base_addr + SPI_DBI_TIMER_REG);
241 		goto OUT;
242 	}
243 
244 	if (sspi->dbi_config->dbi_interface == D2LI) {
245 		switch (sspi->dbi_config->dbi_format) {
246 		case DBI_RGB111:
247 			pixel_cycle = 8;
248 			break;
249 		case DBI_RGB444:
250 		case DBI_RGB565:
251 			pixel_cycle = 9;
252 			break;
253 		case DBI_RGB666:
254 			pixel_cycle = 10;
255 			break;
256 		case DBI_RGB888:
257 			pixel_cycle = 13;
258 			break;
259 		default:
260 			break;
261 		}
262 	} else {
263 		switch (sspi->dbi_config->dbi_format) {
264 		case DBI_RGB111:
265 			pixel_cycle = 8;
266 			break;
267 		case DBI_RGB444:
268 			pixel_cycle = 12;
269 			break;
270 		case DBI_RGB565:
271 			pixel_cycle = 16;
272 			break;
273 		case DBI_RGB666:
274 		case DBI_RGB888:
275 			pixel_cycle = 24;
276 			break;
277 		default:
278 			break;
279 		}
280 	}
281 	timer_val = spi->max_speed_hz / sspi->dbi_config->dbi_fps -
282 		    pixel_cycle * sspi->dbi_config->dbi_video_h * sspi->dbi_config->dbi_video_v;
283 
284 	timer_val |= 0x80000000;
285 	writel(timer_val, base_addr + SPI_DBI_TIMER_REG);
286 	ret = 0;
287 
288 OUT:
289 	return ret;
290 }
291 
292 /* config dbi */
spi_config_dbi(struct sunxi_spi * sspi,struct spi_device * spi)293 static void spi_config_dbi(struct sunxi_spi *sspi, struct spi_device *spi)
294 {
295 	u32 reg_val = 0;
296 	u32 reg_tmp = 0;
297 	u32 config = sspi->dbi_config->dbi_mode;
298 	void __iomem *base_addr = sspi->base_addr;
299 
300 	/*1. command type */
301 	if (config & SPI_DBI_COMMAND_READ_) {
302 		reg_val |= DBI_CR_READ;
303 		reg_tmp = readl(base_addr + SPI_DBI_CR_REG1);
304 		writel(reg_tmp | sspi->dbi_config->dbi_read_bytes,
305 			base_addr + SPI_DBI_CR_REG1);
306 	} else
307 		reg_val &= ~DBI_CR_READ;
308 
309 	/*3. output data sequence */
310 	if (config & SPI_DBI_LSB_FIRST_)
311 		reg_val |= DBI_CR_LSB_FIRST;
312 	else
313 		reg_val &= ~DBI_CR_LSB_FIRST;
314 
315 	/*4. transmit data type */
316 	if (config & SPI_DBI_TRANSMIT_VIDEO_) {
317 		reg_val |= DBI_CR_TRANSMIT_MODE;
318 		writel((sspi->dbi_config->dbi_video_v << 16)|(sspi->dbi_config->dbi_video_h),
319 			base_addr + SPI_DBI_VIDEO_SIZE);
320 		if (sspi->dbi_config->dbi_te_en)
321 			dbi_enable_irq(DBI_TE_INT_EN, base_addr);
322 		else
323 			dbi_enable_irq(DBI_FRAM_DONE_INT_EN, base_addr);
324 	} else {
325 		reg_val &= ~DBI_CR_TRANSMIT_MODE;
326 
327 		writel(0x0, base_addr + SPI_DBI_VIDEO_SIZE);
328 		dbi_disable_irq(DBI_FRAM_DONE_INT_EN | DBI_TE_INT_EN, base_addr);
329 		dbi_enable_irq(DBI_FIFO_EMPTY_INT_EN, base_addr);
330 	}
331 
332 
333 	/*5. output data format */
334 	reg_val &= ~(DBI_CR_FORMAT_MASK);
335 	if (sspi->dbi_config->dbi_format == DBI_RGB111)
336 		reg_val &= ~(0x7 << DBI_CR_FORMAT);
337 	else
338 		reg_val |= ((sspi->dbi_config->dbi_format) << DBI_CR_FORMAT);
339 
340 	/*6. dbi interface select */
341 	reg_val &= ~(DBI_CR_INTERFACE_MASK);
342 
343 	if (sspi->dbi_config->dbi_interface == L3I1)
344 		reg_val &= ~((0x7) << DBI_CR_INTERFACE);
345 	else
346 		reg_val |= ((sspi->dbi_config->dbi_interface) << DBI_CR_INTERFACE);
347 
348 	if (sspi->dbi_config->dbi_format <= DBI_RGB565)
349 		reg_val |= 0x1;
350 	else
351 		reg_val &= ~0x1;
352 
353 	if (sspi->dbi_config->dbi_out_sequence == DBI_OUT_RGB)
354 		reg_val &= ~((0x7) << 16);
355 	else
356 		reg_val |= ((sspi->dbi_config->dbi_out_sequence) << 16);
357 
358 	if (sspi->dbi_config->dbi_src_sequence == DBI_SRC_RGB)
359 		reg_val &= ~((0xf) << 4);
360 	else
361 		reg_val |= ((sspi->dbi_config->dbi_src_sequence) << 4);
362 
363 	if (sspi->dbi_config->dbi_rgb_bit_order == 1)
364 		reg_val |= ((0x1) << 2);
365 	else
366 		reg_val &= ~((0x1) << 2);
367 
368 	if (sspi->dbi_config->dbi_rgb32_alpha_pos == 1)
369 		reg_val |= ((0x1) << 1);
370 	else
371 		reg_val &= ~((0x1) << 1);
372 
373 	writel(reg_val, base_addr + SPI_DBI_CR_REG);
374 
375 	reg_val = 0;
376 
377 	if (sspi->dbi_config->dbi_interface == D2LI) {
378 		reg_val |= DBI_CR2_DCX_PIN;
379 		reg_val &= ~DBI_CR2_SDI_PIN;
380 	} else {
381 		reg_val |= DBI_CR2_SDI_PIN;
382 		reg_val &= ~DBI_CR2_DCX_PIN;
383 	}
384 
385 	if ((sspi->dbi_config->dbi_te_en == DBI_TE_DISABLE) ||
386 	    !(config & SPI_DBI_TRANSMIT_VIDEO_)) {
387 		reg_val &= ~(0x3 << 0); // te disable
388 	} else {
389 		/*te enable*/
390 		reg_val |= 0x1;
391 		if (sspi->dbi_config->dbi_te_en == DBI_TE_FALLING_EDGE)
392 			reg_val |= (0x1 << 1);
393 		else
394 			reg_val &= ~(0x1 << 1);
395 	}
396 
397 	writel(reg_val, base_addr + SPI_DBI_CR_REG2);
398 
399 
400 	dprintk(DEBUG_INFO, "DBI mode configurate : %x\n", reg_val);
401 
402 	reg_val = 0;
403 	if (config & SPI_DBI_DCX_DATA_)
404 		reg_val |= DBI_CR1_DCX_DATA;
405 	else
406 		reg_val &= ~DBI_CR1_DCX_DATA;
407 
408 	if (sspi->dbi_config->dbi_rgb16_pixel_endian == 1)
409 		reg_val |= ((0x1) << 21);
410 	else
411 		reg_val &= ~((0x1) << 21);
412 
413 	/* dbi en mode sel */
414 	if ((sspi->dbi_config->dbi_te_en == DBI_TE_DISABLE) ||
415 	    !(config & SPI_DBI_TRANSMIT_VIDEO_)) {
416 		if (!set_dbi_timer_param(sspi, spi))
417 			reg_val |= (0x2 << 29); // timer trigger mode
418 		else
419 			reg_val &= ~(0x3 << 29); // always on mode
420 	} else {
421 		/*te trigger mode */
422 		reg_val |= ((0x3) << 29);
423 	}
424 
425 	/* config dbi clock mode: auto gating */
426 	if (sspi->dbi_config->dbi_clk_out_mode == SPI_DBI_CLK_ALWAYS_ON)
427 		reg_val &= ~(DBI_CR1_CLK_AUTO);
428 	else
429 		reg_val |= DBI_CR1_CLK_AUTO;
430 
431 	writel(reg_val, base_addr + SPI_DBI_CR_REG1);
432 
433 	if ((debug_mask & DEBUG_INIT) && (debug_mask & DEBUG_DATA)) {
434 		dprintk(DEBUG_DATA, "[spi%d] dbi register dump reg:\n", sspi->master->bus_num);
435 		spi_dump_reg(sspi, 0x100, 0x30);
436 	}
437 }
438 
439 /* enable spi dbi */
spi_enable_dbi(void __iomem * base_addr)440 static void spi_enable_dbi(void __iomem *base_addr)
441 {
442 	u32 reg_val = readl(base_addr + SPI_GC_REG);
443 
444 	reg_val |= SPI_GC_DBI_EN;
445 	writel(reg_val, base_addr + SPI_GC_REG);
446 }
447 
448 /* set dbi mode */
spi_set_dbi(void __iomem * base_addr)449 static void spi_set_dbi(void __iomem *base_addr)
450 {
451 	u32 reg_val = readl(base_addr + SPI_GC_REG);
452 
453 	reg_val |= SPI_GC_DBI_MODE_SEL;
454 	writel(reg_val, base_addr + SPI_GC_REG);
455 }
456 
457 /* spi controller config chip select
458  * only spi controller cs mode can use this function
459  * */
sunxi_spi_ss_select(u32 chipselect,void __iomem * base_addr)460 static s32 sunxi_spi_ss_select(u32 chipselect, void __iomem *base_addr)
461 {
462 	char ret;
463 	u32 reg_val = readl(base_addr + SPI_TC_REG);
464 
465 	if (chipselect < 4) {
466 		reg_val &= ~SPI_TC_SS_MASK;/* SS-chip select, clear two bits */
467 		reg_val |= chipselect << SPI_TC_SS_BIT_POS;/* set chip select */
468 		writel(reg_val, base_addr + SPI_TC_REG);
469 		ret = SUNXI_SPI_OK;
470 	} else {
471 		SPI_ERR("Chip Select set fail! cs = %d\n", chipselect);
472 		ret = SUNXI_SPI_FAIL;
473 	}
474 
475 	return ret;
476 }
477 
478 /* config spi */
spi_config_tc(u32 master,u32 config,void __iomem * base_addr)479 static void spi_config_tc(u32 master, u32 config, void __iomem *base_addr)
480 {
481 	u32 reg_val = readl(base_addr + SPI_TC_REG);
482 
483 	/*1. POL */
484 	if (config & SPI_POL_ACTIVE_)
485 		reg_val |= SPI_TC_POL;/*default POL = 1 */
486 	else
487 		reg_val &= ~SPI_TC_POL;
488 
489 	/*2. PHA */
490 	if (config & SPI_PHA_ACTIVE_)
491 		reg_val |= SPI_TC_PHA;/*default PHA = 1 */
492 	else
493 		reg_val &= ~SPI_TC_PHA;
494 
495 	/*3. SSPOL,chip select signal polarity */
496 	if (config & SPI_CS_HIGH_ACTIVE_)
497 		reg_val &= ~SPI_TC_SPOL;
498 	else
499 		reg_val |= SPI_TC_SPOL; /*default SSPOL = 1,Low level effect */
500 
501 	/*4. LMTF--LSB/MSB transfer first select */
502 	if (config & SPI_LSB_FIRST_ACTIVE_)
503 		reg_val |= SPI_TC_FBS;
504 	else
505 		reg_val &= ~SPI_TC_FBS;/*default LMTF =0, MSB first */
506 
507 	/*master mode: set DDB,DHB,SMC,SSCTL*/
508 	if (master == 1) {
509 		/*5. dummy burst type */
510 		if (config & SPI_DUMMY_ONE_ACTIVE_)
511 			reg_val |= SPI_TC_DDB;
512 		else
513 			reg_val &= ~SPI_TC_DDB;/*default DDB =0, ZERO */
514 
515 		/*6.discard hash burst-DHB */
516 		if (config & SPI_RECEIVE_ALL_ACTIVE_)
517 			reg_val &= ~SPI_TC_DHB;
518 		else
519 			reg_val |= SPI_TC_DHB;/*default DHB =1, discard unused burst */
520 
521 		/*7. set SMC = 1 , SSCTL = 0 ,TPE = 1 */
522 		reg_val &= ~SPI_TC_SSCTL;
523 	} else {
524 		/* tips for slave mode config */
525 		dprintk(DEBUG_INFO, "slave mode configurate control register\n");
526 	}
527 
528 	writel(reg_val, base_addr + SPI_TC_REG);
529 }
530 
531 /* set spi clock */
spi_set_clk(u32 spi_clk,u32 ahb_clk,struct sunxi_spi * sspi)532 static void spi_set_clk(u32 spi_clk, u32 ahb_clk, struct sunxi_spi *sspi)
533 {
534 	dprintk(DEBUG_INFO, "set spi clock %d, mclk %d\n", spi_clk, ahb_clk);
535 
536 	clk_set_rate(sspi->mclk, spi_clk);
537 	if (clk_get_rate(sspi->mclk) != spi_clk) {
538 		clk_set_rate(sspi->mclk, ahb_clk);
539 		SPI_ERR("[spi%d] set spi clock failed, use clk:%d\n",
540 				sspi->master->bus_num, ahb_clk);
541 	}
542 }
543 
544 /* delay internal read sample point*/
spi_sample_delay(u32 sdm,u32 sdc,u32 sdc1,void __iomem * base_addr)545 static void spi_sample_delay(u32 sdm, u32 sdc, u32 sdc1,
546 					void __iomem *base_addr)
547 {
548 	u32 reg_val = readl(base_addr + SPI_TC_REG);
549 	u32 org_val = reg_val;
550 
551 	if (sdm)
552 		reg_val |= SPI_TC_SDM;
553 	else
554 		reg_val &= ~SPI_TC_SDM;
555 
556 	if (sdc)
557 		reg_val |= SPI_TC_SDC;
558 	else
559 		reg_val &= ~SPI_TC_SDC;
560 
561 	if (sdc1)
562 		reg_val |= SPI_TC_SDC1;
563 	else
564 		reg_val &= ~SPI_TC_SDC1;
565 
566 	if (reg_val != org_val)
567 		writel(reg_val, base_addr + SPI_TC_REG);
568 }
569 
spi_set_sample_mode(unsigned int mode,void __iomem * base_addr)570 static void spi_set_sample_mode(unsigned int mode, void __iomem *base_addr)
571 {
572 	unsigned int sample_mode[7] = {
573 		DELAY_NORMAL_SAMPLE, DELAY_0_5_CYCLE_SAMPLE,
574 		DELAY_1_CYCLE_SAMPLE, DELAY_1_5_CYCLE_SAMPLE,
575 		DELAY_2_CYCLE_SAMPLE, DELAY_2_5_CYCLE_SAMPLE,
576 		DELAY_3_CYCLE_SAMPLE
577 	};
578 	spi_sample_delay((sample_mode[mode] >> DELAY_SDM_POS) & 0xf,
579 			(sample_mode[mode] >> DELAY_SDC_POS) & 0xf,
580 			(sample_mode[mode] >>  DELAY_SDC1_POS)& 0xf,
581 			base_addr);
582 }
583 
spi_samp_dl_sw_status(unsigned int status,void __iomem * base_addr)584 static void spi_samp_dl_sw_status(unsigned int status, void __iomem *base_addr)
585 {
586 	unsigned int rval = readl(base_addr + SPI_SAMPLE_DELAY_REG);
587 
588 	if (status)
589 		rval |= SPI_SAMP_DL_SW_EN;
590 	else
591 		rval &= ~SPI_SAMP_DL_SW_EN;
592 
593 	writel(rval, base_addr + SPI_SAMPLE_DELAY_REG);
594 }
595 
spi_samp_mode_enable(unsigned int status,void __iomem * base_addr)596 static void spi_samp_mode_enable(unsigned int status, void __iomem *base_addr)
597 {
598 	unsigned int rval = readl(base_addr + SPI_GC_REG);
599 
600 	if (status)
601 		rval |= SPI_SAMP_MODE_EN;
602 	else
603 		rval &= ~SPI_SAMP_MODE_EN;
604 
605 	writel(rval, base_addr + SPI_GC_REG);
606 }
607 
spi_set_sample_delay(unsigned int sample_delay,void __iomem * base_addr)608 static void spi_set_sample_delay(unsigned int sample_delay,
609 		void __iomem *base_addr)
610 {
611 	unsigned int rval = readl(base_addr + SPI_SAMPLE_DELAY_REG)
612 					& (~(0x3f << 0));
613 
614 	rval |= sample_delay;
615 	writel(rval, base_addr + SPI_SAMPLE_DELAY_REG);
616 }
617 
618 /* start spi transfer */
spi_start_xfer(void __iomem * base_addr)619 static void spi_start_xfer(void __iomem *base_addr)
620 {
621 	u32 reg_val = readl(base_addr + SPI_TC_REG);
622 
623 	reg_val |= SPI_TC_XCH;
624 	writel(reg_val, base_addr + SPI_TC_REG);
625 }
626 
627 /* enable spi bus */
spi_enable_bus(void __iomem * base_addr)628 static void spi_enable_bus(void __iomem *base_addr)
629 {
630 	u32 reg_val = readl(base_addr + SPI_GC_REG);
631 
632 	reg_val |= SPI_GC_EN;
633 	writel(reg_val, base_addr + SPI_GC_REG);
634 }
635 
636 /* disbale spi bus */
spi_disable_bus(void __iomem * base_addr)637 static void spi_disable_bus(void __iomem *base_addr)
638 {
639 	u32 reg_val = readl(base_addr + SPI_GC_REG);
640 
641 	reg_val &= ~SPI_GC_EN;
642 	writel(reg_val, base_addr + SPI_GC_REG);
643 }
644 
645 /* set master mode */
spi_set_master(void __iomem * base_addr)646 static void spi_set_master(void __iomem *base_addr)
647 {
648 	u32 reg_val = readl(base_addr + SPI_GC_REG);
649 
650 	reg_val |= SPI_GC_MODE;
651 	writel(reg_val, base_addr + SPI_GC_REG);
652 }
653 
654 /* set slaev mode */
spi_set_slave(void __iomem * base_addr)655 static void spi_set_slave(void __iomem *base_addr)
656 {
657 	u32 reg_val = readl(base_addr + SPI_GC_REG);
658 	u32 val = SPI_GC_MODE;
659 
660 	reg_val &= ~val;
661 	writel(reg_val, base_addr + SPI_GC_REG);
662 }
663 
664 /* enable transmit pause */
spi_enable_tp(void __iomem * base_addr)665 static void spi_enable_tp(void __iomem *base_addr)
666 {
667 	u32 reg_val = readl(base_addr + SPI_GC_REG);
668 
669 	reg_val |= SPI_GC_TP_EN;
670 	writel(reg_val, base_addr + SPI_GC_REG);
671 }
672 
673 /* soft reset spi controller */
spi_soft_reset(void __iomem * base_addr)674 static void spi_soft_reset(void __iomem *base_addr)
675 {
676 	u32 reg_val = readl(base_addr + SPI_GC_REG);
677 
678 	reg_val |= SPI_GC_SRST;
679 	writel(reg_val, base_addr + SPI_GC_REG);
680 }
681 
682 /* enable irq type */
spi_enable_irq(u32 bitmap,void __iomem * base_addr)683 static void spi_enable_irq(u32 bitmap, void __iomem *base_addr)
684 {
685 	u32 reg_val = readl(base_addr + SPI_INT_CTL_REG);
686 
687 	bitmap &= SPI_INTEN_MASK;
688 	reg_val |= bitmap;
689 	writel(reg_val, base_addr + SPI_INT_CTL_REG);
690 }
691 
692 /* disable irq type */
spi_disable_irq(u32 bitmap,void __iomem * base_addr)693 static void spi_disable_irq(u32 bitmap, void __iomem *base_addr)
694 {
695 	u32 reg_val = readl(base_addr + SPI_INT_CTL_REG);
696 
697 	bitmap &= SPI_INTEN_MASK;
698 	reg_val &= ~bitmap;
699 	writel(reg_val, base_addr + SPI_INT_CTL_REG);
700 }
701 
702 #if IS_ENABLED(CONFIG_DMA_ENGINE)
703 /* enable dma irq */
spi_enable_dma_irq(u32 bitmap,void __iomem * base_addr)704 static void spi_enable_dma_irq(u32 bitmap, void __iomem *base_addr)
705 {
706 	u32 reg_val = readl(base_addr + SPI_FIFO_CTL_REG);
707 
708 	bitmap &= SPI_FIFO_CTL_DRQEN_MASK;
709 	reg_val |= bitmap;
710 	writel(reg_val, base_addr + SPI_FIFO_CTL_REG);
711 
712 	spi_set_dma_mode(base_addr);
713 }
714 
715 /* disable dma irq */
spi_disable_dma_irq(u32 bitmap,void __iomem * base_addr)716 static void spi_disable_dma_irq(u32 bitmap, void __iomem *base_addr)
717 {
718 	u32 reg_val = readl(base_addr + SPI_FIFO_CTL_REG);
719 
720 	bitmap &= SPI_FIFO_CTL_DRQEN_MASK;
721 	reg_val &= ~bitmap;
722 	writel(reg_val, base_addr + SPI_FIFO_CTL_REG);
723 }
724 
spi_enable_dbi_dma(void __iomem * base_addr)725 static void spi_enable_dbi_dma(void __iomem *base_addr)
726 {
727 	u32 reg_val = readl(base_addr + SPI_DBI_CR_REG2);
728 
729 	reg_val |= DBI_CR2_DMA_ENABLE;
730 	writel(reg_val, base_addr + SPI_DBI_CR_REG2);
731 }
732 
spi_disable_dbi_dma(void __iomem * base_addr)733 static void spi_disable_dbi_dma(void __iomem *base_addr)
734 {
735 	u32 reg_val = readl(base_addr + SPI_DBI_CR_REG2);
736 
737 	reg_val &= ~(DBI_CR2_DMA_ENABLE);
738 	writel(reg_val, base_addr + SPI_DBI_CR_REG2);
739 }
740 #endif
741 
742 /* query irq enable */
spi_qry_irq_enable(void __iomem * base_addr)743 static u32 spi_qry_irq_enable(void __iomem *base_addr)
744 {
745 	return (SPI_INTEN_MASK & readl(base_addr + SPI_INT_CTL_REG));
746 }
747 
748 /* query dbi irq pending */
dbi_qry_irq_pending(void __iomem * base_addr)749 static u32 dbi_qry_irq_pending(void __iomem *base_addr)
750 {
751 	return (DBI_INT_STA_MASK & readl(base_addr + SPI_DBI_INT_REG));
752 }
753 
754 /* clear irq pending */
dbi_clr_irq_pending(u32 pending_bit,void __iomem * base_addr)755 static void dbi_clr_irq_pending(u32 pending_bit, void __iomem *base_addr)
756 {
757 	pending_bit &= DBI_INT_STA_MASK;
758 	writel(pending_bit, base_addr + SPI_DBI_INT_REG);
759 }
760 
761 /* query irq pending */
spi_qry_irq_pending(void __iomem * base_addr)762 static u32 spi_qry_irq_pending(void __iomem *base_addr)
763 {
764 	return (SPI_INT_STA_MASK & readl(base_addr + SPI_INT_STA_REG));
765 }
766 
767 /* clear irq pending */
spi_clr_irq_pending(u32 pending_bit,void __iomem * base_addr)768 static void spi_clr_irq_pending(u32 pending_bit, void __iomem *base_addr)
769 {
770 	pending_bit &= SPI_INT_STA_MASK;
771 	writel(pending_bit, base_addr + SPI_INT_STA_REG);
772 }
773 
774 /* query txfifo bytes */
spi_query_txfifo(void __iomem * base_addr)775 static u32 spi_query_txfifo(void __iomem *base_addr)
776 {
777 	u32 reg_val = (SPI_FIFO_STA_TX_CNT & readl(base_addr + SPI_FIFO_STA_REG));
778 
779 	reg_val >>= SPI_TXCNT_BIT_POS;
780 	return reg_val;
781 }
782 
783 /* query rxfifo bytes */
spi_query_rxfifo(void __iomem * base_addr)784 static u32 spi_query_rxfifo(void __iomem *base_addr)
785 {
786 	u32 reg_val = (SPI_FIFO_STA_RX_CNT & readl(base_addr + SPI_FIFO_STA_REG));
787 
788 	reg_val >>= SPI_RXCNT_BIT_POS;
789 	return reg_val;
790 }
791 
792 /* reset fifo */
spi_reset_fifo(void __iomem * base_addr)793 static void spi_reset_fifo(void __iomem *base_addr)
794 {
795 	u32 reg_val = readl(base_addr + SPI_FIFO_CTL_REG);
796 
797 	reg_val |= (SPI_FIFO_CTL_RX_RST|SPI_FIFO_CTL_TX_RST);
798 	/* Set the trigger level of RxFIFO/TxFIFO. */
799 	reg_val &= ~(SPI_FIFO_CTL_RX_LEVEL|SPI_FIFO_CTL_TX_LEVEL);
800 	reg_val |= (0x20<<16) | 0x20;
801 	writel(reg_val, base_addr + SPI_FIFO_CTL_REG);
802 }
803 
spi_set_rx_trig(u32 val,void __iomem * base_addr)804 static void spi_set_rx_trig(u32 val, void __iomem *base_addr)
805 {
806 	u32 reg_val = readl(base_addr + SPI_FIFO_CTL_REG);
807 
808 	reg_val &= ~SPI_FIFO_CTL_RX_LEVEL;
809 	reg_val |= val & SPI_FIFO_CTL_RX_LEVEL;
810 	writel(reg_val, base_addr + SPI_FIFO_CTL_REG);
811 
812 }
813 
814 /* set transfer total length BC, transfer length TC and single transmit length STC */
spi_set_bc_tc_stc(u32 tx_len,u32 rx_len,u32 stc_len,u32 dummy_cnt,void __iomem * base_addr)815 static void spi_set_bc_tc_stc(u32 tx_len, u32 rx_len, u32 stc_len, u32 dummy_cnt, void __iomem *base_addr)
816 {
817 	u32 reg_val = readl(base_addr + SPI_BURST_CNT_REG);
818 
819 	/* set MBC(0x30) = tx_len + rx_len + dummy_cnt */
820 	reg_val &= ~SPI_BC_CNT_MASK;
821 	reg_val |= (SPI_BC_CNT_MASK & (tx_len + rx_len + dummy_cnt));
822 	writel(reg_val, base_addr + SPI_BURST_CNT_REG);
823 
824 	/* set MTC(0x34) = tx_len */
825 	reg_val = readl(base_addr + SPI_TRANSMIT_CNT_REG);
826 	reg_val &= ~SPI_TC_CNT_MASK;
827 	reg_val |= (SPI_TC_CNT_MASK & tx_len);
828 	writel(reg_val, base_addr + SPI_TRANSMIT_CNT_REG);
829 
830 	/* set BBC(0x38) = dummy cnt & single mode transmit counter */
831 	reg_val = readl(base_addr + SPI_BCC_REG);
832 	reg_val &= ~SPI_BCC_STC_MASK;
833 	reg_val |= (SPI_BCC_STC_MASK & stc_len);
834 	reg_val &= ~(0xf << 24);
835 	reg_val |= (dummy_cnt << 24);
836 	writel(reg_val, base_addr + SPI_BCC_REG);
837 }
838 
839 /* sunxi_spi_ss_ctrl : software control or spi controller control
840  * owner = 1 : software contorl
841  * owner = 0 : spi controller control
842  * */
sunxi_spi_ss_ctrl(void __iomem * base_addr,bool owner)843 static void sunxi_spi_ss_ctrl(void __iomem *base_addr, bool owner)
844 {
845 	u32 reg_val = readl(base_addr + SPI_TC_REG);
846 
847 	owner &= 0x1;
848 	if (owner)
849 		reg_val |= SPI_TC_SS_OWNER;
850 	else
851 		reg_val &= ~SPI_TC_SS_OWNER;
852 	writel(reg_val, base_addr + SPI_TC_REG);
853 }
854 
855 /* set dhb, 1: discard unused spi burst; 0: receiving all spi burst */
spi_set_all_burst_received(void __iomem * base_addr)856 static void spi_set_all_burst_received(void __iomem *base_addr)
857 {
858 	u32 reg_val = readl(base_addr+SPI_TC_REG);
859 
860 	reg_val &= ~SPI_TC_DHB;
861 	writel(reg_val, base_addr + SPI_TC_REG);
862 }
863 
spi_disable_dual(void __iomem * base_addr)864 static void spi_disable_dual(void __iomem  *base_addr)
865 {
866 	u32 reg_val = readl(base_addr+SPI_BCC_REG);
867 	reg_val &= ~SPI_BCC_DUAL_MODE;
868 	writel(reg_val, base_addr + SPI_BCC_REG);
869 }
870 
spi_enable_dual(void __iomem * base_addr)871 static void spi_enable_dual(void __iomem  *base_addr)
872 {
873 	u32 reg_val = readl(base_addr+SPI_BCC_REG);
874 	reg_val &= ~SPI_BCC_QUAD_MODE;
875 	reg_val |= SPI_BCC_DUAL_MODE;
876 	writel(reg_val, base_addr + SPI_BCC_REG);
877 }
878 
spi_disable_quad(void __iomem * base_addr)879 static void spi_disable_quad(void __iomem  *base_addr)
880 {
881 	u32 reg_val = readl(base_addr+SPI_BCC_REG);
882 
883 	reg_val &= ~SPI_BCC_QUAD_MODE;
884 	writel(reg_val, base_addr + SPI_BCC_REG);
885 }
886 
spi_enable_quad(void __iomem * base_addr)887 static void spi_enable_quad(void __iomem  *base_addr)
888 {
889 	u32 reg_val = readl(base_addr+SPI_BCC_REG);
890 
891 	reg_val |= SPI_BCC_QUAD_MODE;
892 	writel(reg_val, base_addr + SPI_BCC_REG);
893 }
spi_regulator_request(struct sunxi_spi_platform_data * pdata,struct device * dev)894 static int spi_regulator_request(struct sunxi_spi_platform_data *pdata,
895 			struct device *dev)
896 {
897 	struct regulator *regu = NULL;
898 
899 	if (pdata->regulator != NULL)
900 		return 0;
901 
902 	regu = devm_regulator_get(dev, "spi");
903 	if (IS_ERR(regu)) {
904 		SPI_ERR("%s: spi get supply failed!\n", __func__);
905 		return -1;
906 	}
907 
908 	pdata->regulator = regu;
909 	return 0;
910 }
911 
spi_regulator_enable(struct sunxi_spi_platform_data * pdata)912 static int spi_regulator_enable(struct sunxi_spi_platform_data *pdata)
913 {
914 	if (pdata->regulator == NULL)
915 		return 0;
916 
917 	if (regulator_enable(pdata->regulator) != 0) {
918 		SPI_ERR("enable regulator %s failed!\n", pdata->regulator_id);
919 		return -1;
920 	}
921 	return 0;
922 }
923 
spi_regulator_disable(struct sunxi_spi_platform_data * pdata)924 static int spi_regulator_disable(struct sunxi_spi_platform_data *pdata)
925 {
926 	if (pdata->regulator == NULL)
927 		return 0;
928 
929 	if (regulator_disable(pdata->regulator) != 0) {
930 		SPI_ERR("enable regulator %s failed!\n", pdata->regulator_id);
931 		return -1;
932 	}
933 	return 0;
934 }
935 
936 #if IS_ENABLED(CONFIG_DMA_ENGINE)
937 
938 /* ------------------------------- dma operation start----------------------- */
939 /* dma full done callback for spi rx */
sunxi_spi_dma_cb_rx(void * data)940 static void sunxi_spi_dma_cb_rx(void *data)
941 {
942 	struct sunxi_spi *sspi = (struct sunxi_spi *)data;
943 	unsigned long flags = 0;
944 	void __iomem *base_addr = sspi->base_addr;
945 
946 	spin_lock_irqsave(&sspi->lock, flags);
947 	dprintk(DEBUG_INFO, "[spi%d] dma read data end\n", sspi->master->bus_num);
948 
949 	if (spi_query_rxfifo(base_addr) > 0) {
950 		SPI_ERR("[spi%d] DMA end, but RxFIFO isn't empty! FSR: %#x\n",
951 			sspi->master->bus_num, spi_query_rxfifo(base_addr));
952 		sspi->result = -1; /* failed */
953 	} else {
954 		sspi->result = 0;
955 	}
956 
957 	complete(&sspi->done);
958 	spin_unlock_irqrestore(&sspi->lock, flags);
959 }
960 
961 /* dma full done callback for spi tx */
sunxi_spi_dma_cb_tx(void * data)962 static void sunxi_spi_dma_cb_tx(void *data)
963 {
964 	struct sunxi_spi *sspi = (struct sunxi_spi *)data;
965 	unsigned long flags = 0;
966 
967 	spin_lock_irqsave(&sspi->lock, flags);
968 	dprintk(DEBUG_INFO, "[spi%d] dma write data end\n", sspi->master->bus_num);
969 	spin_unlock_irqrestore(&sspi->lock, flags);
970 }
971 
sunxi_spi_dmg_sg_cnt(void * addr,int len)972 static int sunxi_spi_dmg_sg_cnt(void *addr, int len)
973 {
974 	int npages = 0;
975 	char *bufp = (char *)addr;
976 	int mapbytes = 0;
977 	int bytesleft = len;
978 
979 	while (bytesleft > 0) {
980 		if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
981 			mapbytes = bytesleft;
982 		else
983 			mapbytes = PAGE_SIZE - offset_in_page(bufp);
984 
985 		npages++;
986 		bufp += mapbytes;
987 		bytesleft -= mapbytes;
988 	}
989 	return npages;
990 }
991 
sunxi_spi_dma_init_sg(spi_dma_info_t * info,void * addr,int len)992 static int sunxi_spi_dma_init_sg(spi_dma_info_t *info, void *addr,
993 				 int len)
994 {
995 	int i;
996 	int npages = 0;
997 	void *bufp = addr;
998 	int mapbytes = 0;
999 	int bytesleft = len;
1000 
1001 	npages = sunxi_spi_dmg_sg_cnt(addr, len);
1002 	WARN_ON(npages == 0);
1003 	dprintk(DEBUG_INFO, "npages = %d, len = %d\n", npages, len);
1004 	if (npages > SPI_MAX_PAGES)
1005 		npages = SPI_MAX_PAGES;
1006 
1007 	sg_init_table(info->sg, npages);
1008 	for (i = 0; i < npages; i++) {
1009 		/* If there are less bytes left than what fits
1010 		 * in the current page (plus page alignment offset)
1011 		 * we just feed in this, else we stuff in as much
1012 		 * as we can.
1013 		 */
1014 		if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
1015 			mapbytes = bytesleft;
1016 		else
1017 			mapbytes = PAGE_SIZE - offset_in_page(bufp);
1018 
1019 		dprintk(DEBUG_INFO, "%d: len %d, offset %ld, addr %p(%d)\n", i, mapbytes,
1020 			offset_in_page(bufp), bufp, virt_addr_valid(bufp));
1021 		if (virt_addr_valid(bufp))
1022 			sg_set_page(&info->sg[i], virt_to_page(bufp),
1023 				    mapbytes, offset_in_page(bufp));
1024 		else
1025 			sg_set_page(&info->sg[i], vmalloc_to_page(bufp),
1026 				    mapbytes, offset_in_page(bufp));
1027 
1028 		bufp += mapbytes;
1029 		bytesleft -= mapbytes;
1030 	}
1031 
1032 	WARN_ON(bytesleft);
1033 	info->nents = npages;
1034 	return 0;
1035 }
1036 
1037 /* request dma channel and set callback function */
sunxi_spi_prepare_dma(struct device * dev,spi_dma_info_t * _info,enum spi_dma_dir _dir,const char * name)1038 static int sunxi_spi_prepare_dma(struct device *dev, spi_dma_info_t *_info,
1039 				enum spi_dma_dir _dir, const char *name)
1040 {
1041 	dprintk(DEBUG_INFO, "Init DMA, dir %d\n", _dir);
1042 
1043 	if (_info->chan == NULL) {
1044 		_info->chan = dma_request_chan(dev, name);
1045 		if (IS_ERR(_info->chan)) {
1046 			SPI_ERR("Request DMA(dir %d) failed!\n", _dir);
1047 			return -EINVAL;
1048 		}
1049 	}
1050 
1051 	_info->dir = _dir;
1052 	return 0;
1053 }
1054 
sunxi_spi_config_dma_rx(struct sunxi_spi * sspi,struct spi_transfer * t)1055 static int sunxi_spi_config_dma_rx(struct sunxi_spi *sspi, struct spi_transfer *t)
1056 {
1057 	int ret = 0;
1058 	int nents = 0;
1059 	struct dma_slave_config dma_conf = {0};
1060 	struct dma_async_tx_descriptor *dma_desc = NULL;
1061 	unsigned int i, j;
1062 	u8 buf[64], cnt = 0;
1063 
1064 	if (debug_mask & DEBUG_INFO3) {
1065 		dprintk(DEBUG_INIT, "t->len = %d\n", t->len);
1066 		if (debug_mask & DEBUG_DATA) {
1067 			for (i = 0; i < t->len; i += 16) {
1068 				cnt = 0;
1069 				cnt += sprintf(buf + cnt, "%03x: ", i);
1070 				for (j = 0; ((i + j) < t->len) && (j < 16); j++)
1071 					cnt += sprintf(buf + cnt, "%02x ",
1072 							((unsigned char *)(t->rx_buf))[i+j]);
1073 				pr_warn("%s\n", buf);
1074 			}
1075 		}
1076 	}
1077 
1078 	ret = sunxi_spi_dma_init_sg(&sspi->dma_rx, t->rx_buf, t->len);
1079 	if (ret != 0)
1080 		return ret;
1081 
1082 	dma_conf.direction = DMA_DEV_TO_MEM;
1083 	dma_conf.src_addr = sspi->base_addr_phy + SPI_RXDATA_REG;
1084 	if (t->len%DMA_SLAVE_BUSWIDTH_4_BYTES) {
1085 		dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1086 		dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1087 	} else {
1088 		dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1089 		dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1090 	}
1091 	dma_conf.src_maxburst = 4;
1092 	dma_conf.dst_maxburst = 4;
1093 	dmaengine_slave_config(sspi->dma_rx.chan, &dma_conf);
1094 
1095 	nents = dma_map_sg(&sspi->pdev->dev, sspi->dma_rx.sg,
1096 			   sspi->dma_rx.nents, DMA_FROM_DEVICE);
1097 	if (!nents) {
1098 		SPI_ERR("[spi%d] dma_map_sg(%d) failed! return %d\n",
1099 				sspi->master->bus_num, sspi->dma_rx.nents, nents);
1100 		return -ENOMEM;
1101 	}
1102 	dprintk(DEBUG_INFO, "[spi%d] npages = %d, nents = %d\n",
1103 			sspi->master->bus_num, sspi->dma_rx.nents, nents);
1104 
1105 	dma_desc = dmaengine_prep_slave_sg(sspi->dma_rx.chan, sspi->dma_rx.sg,
1106 					   nents, DMA_DEV_TO_MEM,
1107 					   DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1108 	if (!dma_desc) {
1109 		SPI_ERR("[spi%d] dmaengine_prep_slave_sg() failed!\n",
1110 				sspi->master->bus_num);
1111 		dma_unmap_sg(&sspi->pdev->dev, sspi->dma_rx.sg,
1112 			     sspi->dma_rx.nents, DMA_FROM_DEVICE);
1113 		return -1;
1114 	}
1115 
1116 	dma_desc->callback = sunxi_spi_dma_cb_rx;
1117 	dma_desc->callback_param = (void *)sspi;
1118 	dmaengine_submit(dma_desc);
1119 
1120 	return 0;
1121 }
1122 
sunxi_spi_config_dma_tx(struct sunxi_spi * sspi,struct spi_transfer * t)1123 static int sunxi_spi_config_dma_tx(struct sunxi_spi *sspi, struct spi_transfer *t)
1124 {
1125 	int ret = 0;
1126 	int nents = 0;
1127 	struct dma_slave_config dma_conf = {0};
1128 	struct dma_async_tx_descriptor *dma_desc = NULL;
1129 	unsigned int i, j;
1130 	u8 buf[64], cnt = 0;
1131 
1132 	if (debug_mask & DEBUG_INFO4) {
1133 		dprintk(DEBUG_INIT, "t->len = %d\n", t->len);
1134 		if (debug_mask & DEBUG_DATA) {
1135 			for (i = 0; i < t->len; i += 16) {
1136 				cnt = 0;
1137 				cnt += sprintf(buf + cnt, "%03x: ", i);
1138 				for (j = 0; ((i + j) < t->len) && (j < 16); j++)
1139 					cnt += sprintf(buf + cnt, "%02x ",
1140 							((unsigned char *)(t->tx_buf))[i+j]);
1141 				pr_warn("%s\n", buf);
1142 			}
1143 		}
1144 	}
1145 
1146 	ret = sunxi_spi_dma_init_sg(&sspi->dma_tx, (void *)t->tx_buf,
1147 				    t->len);
1148 	if (ret != 0)
1149 		return ret;
1150 
1151 	dma_conf.direction = DMA_MEM_TO_DEV;
1152 	dma_conf.dst_addr = sspi->base_addr_phy + SPI_TXDATA_REG;
1153 	if (t->len%DMA_SLAVE_BUSWIDTH_4_BYTES) {
1154 		dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1155 		dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1156 	} else {
1157 		dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1158 		dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1159 	}
1160 	dma_conf.src_maxburst = 4;
1161 	dma_conf.dst_maxburst = 4;
1162 	dmaengine_slave_config(sspi->dma_tx.chan, &dma_conf);
1163 
1164 	nents = dma_map_sg(&sspi->pdev->dev, sspi->dma_tx.sg, sspi->dma_tx.nents, DMA_TO_DEVICE);
1165 	if (!nents) {
1166 		SPI_ERR("[spi%d] dma_map_sg(%d) failed! return %d\n",
1167 				sspi->master->bus_num, sspi->dma_tx.nents, nents);
1168 		return -ENOMEM;
1169 	}
1170 	dprintk(DEBUG_INFO, "[spi%d] npages = %d, nents = %d\n",
1171 			sspi->master->bus_num, sspi->dma_tx.nents, nents);
1172 
1173 	dma_desc = dmaengine_prep_slave_sg(sspi->dma_tx.chan, sspi->dma_tx.sg,
1174 					   nents, DMA_MEM_TO_DEV,
1175 					   DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1176 	if (!dma_desc) {
1177 		SPI_ERR("[spi%d] dmaengine_prep_slave_sg() failed!\n",
1178 				sspi->master->bus_num);
1179 		dma_unmap_sg(&sspi->pdev->dev, sspi->dma_tx.sg,
1180 			     sspi->dma_tx.nents, DMA_TO_DEVICE);
1181 		return -1;
1182 	}
1183 
1184 	dma_desc->callback = sunxi_spi_dma_cb_tx;
1185 	dma_desc->callback_param = (void *)sspi;
1186 	dmaengine_submit(dma_desc);
1187 	return 0;
1188 }
1189 
1190 /* config dma src and dst address,
1191  * io or linear address,
1192  * drq type,
1193  * then enqueue
1194  * but not trigger dma start
1195  */
sunxi_spi_config_dma(struct sunxi_spi * sspi,enum spi_dma_dir dma_dir,struct spi_transfer * t)1196 static int sunxi_spi_config_dma(struct sunxi_spi *sspi, enum spi_dma_dir dma_dir, struct spi_transfer *t)
1197 {
1198 	if (dma_dir == SPI_DMA_RDEV)
1199 		return sunxi_spi_config_dma_rx(sspi, t);
1200 	else
1201 		return sunxi_spi_config_dma_tx(sspi, t);
1202 }
1203 
1204 /* set dma start flag, if queue, it will auto restart to transfer next queue */
sunxi_spi_start_dma(spi_dma_info_t * _info)1205 static int sunxi_spi_start_dma(spi_dma_info_t *_info)
1206 {
1207 	dma_async_issue_pending(_info->chan);
1208 	return 0;
1209 }
1210 
1211 /* Unmap and free the SG tables */
sunxi_spi_dma_free_sg(struct sunxi_spi * sspi,spi_dma_info_t * info)1212 static void sunxi_spi_dma_free_sg(struct sunxi_spi *sspi, spi_dma_info_t *info)
1213 {
1214 	if (info->dir == SPI_DMA_RWNULL)
1215 		return;
1216 
1217 	dma_unmap_sg(&sspi->pdev->dev, info->sg, info->nents, (enum dma_data_direction)info->dir);
1218 	info->dir = SPI_DMA_RWNULL;
1219 
1220 	/* Never release the DMA channel. Duanmintao
1221 	 * dma_release_channel(info->chan);
1222 	 * info->chan = NULL;
1223 	 */
1224 }
1225 
1226 /* release dma channel, and set queue status to idle. */
sunxi_spi_release_dma(struct sunxi_spi * sspi,struct spi_transfer * t)1227 static int sunxi_spi_release_dma(struct sunxi_spi *sspi, struct spi_transfer *t)
1228 {
1229 	unsigned long flags = 0;
1230 
1231 	spin_lock_irqsave(&sspi->lock, flags);
1232 
1233 	sunxi_spi_dma_free_sg(sspi, &sspi->dma_rx);
1234 	sunxi_spi_dma_free_sg(sspi, &sspi->dma_tx);
1235 
1236 	spin_unlock_irqrestore(&sspi->lock, flags);
1237 	return 0;
1238 }
1239 #endif
1240 
1241 /* sunxi_spi_set_cs : spi control set cs to connect device
1242  * enable : 1, working mode : set ss to connect device
1243  * enable : 0, default mode : set ss to do not connect device
1244  *
1245  * spi controller cs mode use this funtion to set cs
1246  * software cs mode use kernel code to set cs
1247  * */
sunxi_spi_set_cs(struct spi_device * spi,bool enable)1248 static void sunxi_spi_set_cs(struct spi_device *spi, bool enable)
1249 {
1250 	u32 reg_val;
1251 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1252 
1253 	sunxi_spi_ss_select(spi->chip_select, sspi->base_addr);
1254 
1255 	reg_val = readl(sspi->base_addr + SPI_TC_REG);
1256 	enable &= 0x01;
1257 	if (enable)	//set cs to connect device
1258 		reg_val |= SPI_TC_SS_LEVEL;
1259 	else		//set cs to default mode
1260 		reg_val &= ~SPI_TC_SS_LEVEL;
1261 	writel(reg_val, sspi->base_addr + SPI_TC_REG);
1262 }
1263 
1264 /* change the properties of spi device with spi transfer.
1265  * every spi transfer must call this interface to update
1266  * the master to the excute transfer set clock.
1267  * return:  >= 0 : succeed;    < 0: failed.
1268  */
sunxi_spi_xfer_setup(struct spi_device * spi,struct spi_transfer * t)1269 static int sunxi_spi_xfer_setup(struct spi_device *spi, struct spi_transfer *t)
1270 {
1271 	/* get at the setup function, the properties of spi device */
1272 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1273 	u32 spi_speed_hz;
1274 	void __iomem *base_addr = sspi->base_addr;
1275 
1276 	spi_speed_hz  = (t && t->speed_hz) ? t->speed_hz : spi->max_speed_hz;
1277 
1278 	if (sspi->sample_delay == SAMP_MODE_DL_DEFAULT) {
1279 		if (spi_speed_hz >= SPI_HIGH_FREQUENCY)
1280 			spi_sample_delay(0, 1, 0, base_addr);
1281 		else if (spi_speed_hz <= SPI_LOW_FREQUENCY)
1282 			spi_sample_delay(1, 0, 0, base_addr);
1283 		else
1284 			spi_sample_delay(0, 0, 0, base_addr);
1285 	} else {
1286 		spi_samp_mode_enable(1, base_addr);
1287 		spi_samp_dl_sw_status(1, base_addr);
1288 		spi_set_sample_mode(sspi->sample_mode, base_addr);
1289 		spi_set_sample_delay(sspi->sample_delay, base_addr);
1290 	}
1291 
1292 #if IS_ENABLED(CONFIG_EVB_PLATFORM)
1293 	spi_set_clk(spi_speed_hz, clk_get_rate(sspi->mclk), sspi);
1294 #else
1295 	spi_set_clk(spi_speed_hz, 24000000, sspi);
1296 #endif
1297 
1298 	spi_config_tc(1, spi->mode, sspi->base_addr);
1299 	return 0;
1300 }
1301 
sunxi_spi_mode_check(struct sunxi_spi * sspi,struct spi_device * spi,struct spi_transfer * t)1302 static int sunxi_spi_mode_check(struct sunxi_spi *sspi, struct spi_device *spi, struct spi_transfer *t)
1303 {
1304 	unsigned long flags = 0;
1305 
1306 	if (sspi->mode_type != MODE_TYPE_NULL)
1307 		return -EINVAL;
1308 
1309 	/* full duplex */
1310 	spin_lock_irqsave(&sspi->lock, flags);
1311 	if (t->tx_buf && t->rx_buf) {
1312 		spi_set_all_burst_received(sspi->base_addr);
1313 		spi_set_bc_tc_stc(t->len, 0, t->len, 0, sspi->base_addr);
1314 		sspi->mode_type = SINGLE_FULL_DUPLEX_RX_TX;
1315 		dprintk(DEBUG_INFO, "[spi%d] Single mode Full duplex tx & rx\n", sspi->master->bus_num);
1316 	} /* half duplex transmit */
1317 	else if (t->tx_buf) {
1318 		if (t->tx_nbits == SPI_NBITS_QUAD) {
1319 			spi_disable_dual(sspi->base_addr);
1320 			spi_enable_quad(sspi->base_addr);
1321 			spi_set_bc_tc_stc(t->len, 0, 0, 0, sspi->base_addr);
1322 			sspi->mode_type = QUAD_HALF_DUPLEX_TX;
1323 			dprintk(DEBUG_INFO, "[spi%d] Quad mode Half duplex tx\n", sspi->master->bus_num);
1324 		} else if (t->tx_nbits == SPI_NBITS_DUAL) {
1325 			spi_disable_quad(sspi->base_addr);
1326 			spi_enable_dual(sspi->base_addr);
1327 			spi_set_bc_tc_stc(t->len, 0, 0, 0, sspi->base_addr);
1328 			sspi->mode_type = DUAL_HALF_DUPLEX_TX;
1329 			dprintk(DEBUG_INFO, "[spi%d] Dual mode Half duplex tx\n", sspi->master->bus_num);
1330 		} else {
1331 			spi_disable_quad(sspi->base_addr);
1332 			spi_disable_dual(sspi->base_addr);
1333 			spi_set_bc_tc_stc(t->len, 0, t->len, 0, sspi->base_addr);
1334 			sspi->mode_type = SINGLE_HALF_DUPLEX_TX;
1335 			dprintk(DEBUG_INFO, "[spi%d] Single mode Half duplex tx\n", sspi->master->bus_num);
1336 		}
1337 	} /* half duplex receive */
1338 	else if (t->rx_buf) {
1339 		if (t->rx_nbits == SPI_NBITS_QUAD) {
1340 			spi_disable_dual(sspi->base_addr);
1341 			spi_enable_quad(sspi->base_addr);
1342 			spi_set_bc_tc_stc(0, t->len, 0, 0, sspi->base_addr);
1343 			sspi->mode_type = QUAD_HALF_DUPLEX_RX;
1344 			dprintk(DEBUG_INFO, "[spi%d] Quad mode Half duplex rx\n", sspi->master->bus_num);
1345 		} else if (t->rx_nbits == SPI_NBITS_DUAL) {
1346 			spi_disable_quad(sspi->base_addr);
1347 			spi_enable_dual(sspi->base_addr);
1348 			spi_set_bc_tc_stc(0, t->len, 0, 0, sspi->base_addr);
1349 			sspi->mode_type = DUAL_HALF_DUPLEX_RX;
1350 			dprintk(DEBUG_INFO, "[spi%d] Dual mode Half duplex rx\n", sspi->master->bus_num);
1351 		} else {
1352 			spi_disable_quad(sspi->base_addr);
1353 			spi_disable_dual(sspi->base_addr);
1354 			spi_set_bc_tc_stc(0, t->len, 0, 0, sspi->base_addr);
1355 			sspi->mode_type = SINGLE_HALF_DUPLEX_RX;
1356 			dprintk(DEBUG_INFO, "[spi%d] Single mode Half duplex rx\n", sspi->master->bus_num);
1357 		}
1358 	}
1359 	spin_unlock_irqrestore(&sspi->lock, flags);
1360 
1361 	return 0;
1362 
1363 }
1364 
sunxi_spi_cpu_readl(struct spi_device * spi,struct spi_transfer * t)1365 static int sunxi_spi_cpu_readl(struct spi_device *spi, struct spi_transfer *t)
1366 {
1367 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1368 	void __iomem *base_addr = sspi->base_addr;
1369 	unsigned rx_len = t->len;	/* number of bytes sent */
1370 	unsigned char *rx_buf = (unsigned char *)t->rx_buf;
1371 	unsigned int poll_time = 0x7ffffff;
1372 	unsigned int i, j;
1373 	u8 buf[64], cnt = 0;
1374 
1375 	while (rx_len) {
1376 	/* rxFIFO counter */
1377 		if (spi_query_rxfifo(base_addr) && (--poll_time > 0)) {
1378 			*rx_buf++ =  readb(base_addr + SPI_RXDATA_REG);
1379 			--rx_len;
1380 		}
1381 	}
1382 	if (poll_time <= 0) {
1383 		SPI_ERR("[spi%d] cpu receive data time out!\n", sspi->master->bus_num);
1384 		return -1;
1385 	}
1386 
1387 	if (debug_mask & DEBUG_INFO1) {
1388 		dprintk(DEBUG_INIT, "t->len = %d\n", t->len);
1389 		if (debug_mask & DEBUG_DATA) {
1390 			for (i = 0; i < t->len; i += 16) {
1391 				cnt = 0;
1392 				cnt += sprintf(buf + cnt, "%03x: ", i);
1393 				for (j = 0; ((i + j) < t->len) && (j < 16); j++)
1394 					cnt += sprintf(buf + cnt, "%02x ",
1395 							((unsigned char *)(t->rx_buf))[i+j]);
1396 				pr_warn("%s\n", buf);
1397 			}
1398 		}
1399 	}
1400 
1401 	return 0;
1402 }
1403 
sunxi_spi_cpu_writel(struct spi_device * spi,struct spi_transfer * t)1404 static int sunxi_spi_cpu_writel(struct spi_device *spi, struct spi_transfer *t)
1405 {
1406 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1407 	void __iomem *base_addr = sspi->base_addr;
1408 	unsigned long flags = 0;
1409 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1410 	unsigned char time;
1411 #endif
1412 	unsigned tx_len = t->len;	/* number of bytes receieved */
1413 	unsigned char *tx_buf = (unsigned char *)t->tx_buf;
1414 	unsigned int poll_time = 0x7ffffff;
1415 	unsigned int i, j;
1416 	u8 buf[64], cnt = 0;
1417 
1418 	if (debug_mask & DEBUG_INFO2) {
1419 		dprintk(DEBUG_INIT, "t->len = %d\n", t->len);
1420 		if (debug_mask & DEBUG_DATA) {
1421 			for (i = 0; i < t->len; i += 16) {
1422 				cnt = 0;
1423 				cnt += sprintf(buf + cnt, "%03x: ", i);
1424 				for (j = 0; ((i + j) < t->len) && (j < 16); j++)
1425 					cnt += sprintf(buf + cnt, "%02x ",
1426 							((unsigned char *)(t->tx_buf))[i+j]);
1427 				pr_warn("%s\n", buf);
1428 			}
1429 		}
1430 	}
1431 
1432 	spin_lock_irqsave(&sspi->lock, flags);
1433 	for (; tx_len > 0; --tx_len) {
1434 		writeb(*tx_buf++, base_addr + SPI_TXDATA_REG);
1435 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1436 		if (spi_query_txfifo(base_addr) >= MAX_FIFU)
1437 			for (time = 2; 0 < time; --time)
1438 				;
1439 #endif
1440 	}
1441 	spin_unlock_irqrestore(&sspi->lock, flags);
1442 	while (spi_query_txfifo(base_addr) && (--poll_time > 0))
1443 		;
1444 	if (poll_time <= 0) {
1445 		SPI_ERR("[spi%d] cpu transfer data time out!\n", sspi->master->bus_num);
1446 		return -1;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 #if IS_ENABLED(CONFIG_DMA_ENGINE)
sunxi_spi_dma_rx_config(struct spi_device * spi,struct spi_transfer * t)1453 static int sunxi_spi_dma_rx_config(struct spi_device *spi, struct spi_transfer *t)
1454 {
1455 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1456 	void __iomem *base_addr = sspi->base_addr;
1457 	int ret = 0;
1458 
1459 	/* rxFIFO reday dma request enable */
1460 	spi_enable_dma_irq(SPI_FIFO_CTL_RX_DRQEN, base_addr);
1461 	ret = sunxi_spi_prepare_dma(&sspi->pdev->dev, &sspi->dma_rx,
1462 				SPI_DMA_RDEV, "rx");
1463 	if (ret < 0) {
1464 		spi_disable_dma_irq(SPI_FIFO_CTL_RX_DRQEN, base_addr);
1465 		spi_disable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, base_addr);
1466 		return -EINVAL;
1467 	}
1468 	sunxi_spi_config_dma(sspi, SPI_DMA_RDEV, t);
1469 	sunxi_spi_start_dma(&sspi->dma_rx);
1470 
1471 	return ret;
1472 }
1473 
sunxi_spi_dma_tx_config(struct spi_device * spi,struct spi_transfer * t)1474 static int sunxi_spi_dma_tx_config(struct spi_device *spi, struct spi_transfer *t)
1475 {
1476 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1477 	void __iomem *base_addr = sspi->base_addr;
1478 	int ret = 0;
1479 
1480 	spi_enable_dma_irq(SPI_FIFO_CTL_TX_DRQEN, base_addr);
1481 	ret = sunxi_spi_prepare_dma(&sspi->pdev->dev, &sspi->dma_tx,
1482 				SPI_DMA_WDEV, "tx");
1483 	if (ret < 0) {
1484 		spi_disable_dma_irq(SPI_FIFO_CTL_TX_DRQEN, base_addr);
1485 		spi_disable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, base_addr);
1486 		return -EINVAL;
1487 	}
1488 	sunxi_spi_config_dma(sspi, SPI_DMA_WDEV, t);
1489 	sunxi_spi_start_dma(&sspi->dma_tx);
1490 
1491 	return ret;
1492 }
sunxi_spi_dma_transfer(struct spi_device * spi,struct spi_transfer * t)1493 static int sunxi_spi_dma_transfer(struct spi_device *spi, struct spi_transfer *t)
1494 {
1495 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1496 	void __iomem *base_addr = sspi->base_addr;
1497 	unsigned tx_len = t->len;	/* number of bytes receieved */
1498 	unsigned rx_len = t->len;	/* number of bytes sent */
1499 
1500 	switch (sspi->mode_type) {
1501 	case SINGLE_HALF_DUPLEX_RX:
1502 	case DUAL_HALF_DUPLEX_RX:
1503 	case QUAD_HALF_DUPLEX_RX:
1504 	{
1505 		/* >64 use DMA transfer, or use cpu */
1506 		if (t->len > BULK_DATA_BOUNDARY) {
1507 			dprintk(DEBUG_INFO, "[spi%d] rx -> by dma\n", sspi->master->bus_num);
1508 			/* For Rx mode, the DMA end(not TC flag) is real end. */
1509 			spi_disable_irq(SPI_INTEN_TC, base_addr);
1510 			sunxi_spi_dma_rx_config(spi, t);
1511 			if (!sspi->dbi_enabled)
1512 				spi_start_xfer(base_addr);
1513 		} else {
1514 			dprintk(DEBUG_INFO, "[spi%d] rx -> by ahb\n", sspi->master->bus_num);
1515 			/* SMC=1,XCH trigger the transfer */
1516 			if (!sspi->dbi_enabled)
1517 				spi_start_xfer(base_addr);
1518 			sunxi_spi_cpu_readl(spi, t);
1519 		}
1520 		break;
1521 	}
1522 	case SINGLE_HALF_DUPLEX_TX:
1523 	case DUAL_HALF_DUPLEX_TX:
1524 	case QUAD_HALF_DUPLEX_TX:
1525 	{
1526 		/* >64 use DMA transfer, or use cpu */
1527 		if (t->len > BULK_DATA_BOUNDARY) {
1528 			dprintk(DEBUG_INFO, "[spi%d] tx -> by dma\n", sspi->master->bus_num);
1529 			if (!sspi->dbi_enabled)
1530 				spi_start_xfer(base_addr);
1531 			/* txFIFO empty dma request enable */
1532 			sunxi_spi_dma_tx_config(spi, t);
1533 		} else {
1534 			dprintk(DEBUG_INFO, "[spi%d] tx -> by ahb\n", sspi->master->bus_num);
1535 			if (!sspi->dbi_enabled)
1536 				spi_start_xfer(base_addr);
1537 			sunxi_spi_cpu_writel(spi, t);
1538 		}
1539 		break;
1540 	}
1541 	case SINGLE_FULL_DUPLEX_RX_TX:
1542 	{
1543 		/* >64 use DMA transfer, or use cpu */
1544 		if (t->len > BULK_DATA_BOUNDARY) {
1545 			dprintk(DEBUG_INFO, "[spi%d] rx and tx -> by dma\n", sspi->master->bus_num);
1546 			/* For Rx mode, the DMA end(not TC flag) is real end. */
1547 			spi_disable_irq(SPI_INTEN_TC, base_addr);
1548 			sunxi_spi_dma_rx_config(spi, t);
1549 			if (!sspi->dbi_enabled)
1550 				spi_start_xfer(base_addr);
1551 			sunxi_spi_dma_tx_config(spi, t);
1552 		} else {
1553 			dprintk(DEBUG_INFO, "[spi%d] rx and tx -> by ahb\n", sspi->master->bus_num);
1554 			if ((rx_len == 0) || (tx_len == 0))
1555 				return -EINVAL;
1556 
1557 			if (!sspi->dbi_enabled)
1558 				spi_start_xfer(base_addr);
1559 			sunxi_spi_cpu_writel(spi, t);
1560 			sunxi_spi_cpu_readl(spi, t);
1561 		}
1562 		break;
1563 	}
1564 	default:
1565 		return -1;
1566 	}
1567 
1568 	return 0;
1569 }
1570 #else
sunxi_spi_cpu_transfer(struct spi_device * spi,struct spi_transfer * t)1571 static int sunxi_spi_cpu_transfer(struct spi_device *spi, struct spi_transfer *t)
1572 {
1573 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1574 	void __iomem *base_addr = sspi->base_addr;
1575 	unsigned tx_len = t->len;	/* number of bytes receieved */
1576 	unsigned rx_len = t->len;	/* number of bytes sent */
1577 
1578 	switch (sspi->mode_type) {
1579 	case SINGLE_HALF_DUPLEX_RX:
1580 	case DUAL_HALF_DUPLEX_RX:
1581 	case QUAD_HALF_DUPLEX_RX:
1582 	{
1583 		dprintk(DEBUG_INFO, "[spi%d] rx -> by ahb\n", sspi->master->bus_num);
1584 		/* SMC=1,XCH trigger the transfer */
1585 		if (!sspi->dbi_enabled)
1586 			spi_start_xfer(base_addr);
1587 		sunxi_spi_cpu_readl(spi, t);
1588 		break;
1589 	}
1590 	case SINGLE_HALF_DUPLEX_TX:
1591 	case DUAL_HALF_DUPLEX_TX:
1592 	case QUAD_HALF_DUPLEX_TX:
1593 	{
1594 		dprintk(DEBUG_INFO, "[spi%d] tx -> by ahb\n", sspi->master->bus_num);
1595 		if (!sspi->dbi_enabled)
1596 			spi_start_xfer(base_addr);
1597 		sunxi_spi_cpu_writel(spi, t);
1598 		break;
1599 	}
1600 	case SINGLE_FULL_DUPLEX_RX_TX:
1601 	{
1602 		dprintk(DEBUG_INFO, "[spi%d] rx and tx -> by ahb\n", sspi->master->bus_num);
1603 		if ((rx_len == 0) || (tx_len == 0))
1604 			return -EINVAL;
1605 
1606 		if (!sspi->dbi_enabled)
1607 			spi_start_xfer(base_addr);
1608 		sunxi_spi_cpu_writel(spi, t);
1609 		sunxi_spi_cpu_readl(spi, t);
1610 		break;
1611 	}
1612 	default:
1613 		return -1;
1614 	}
1615 
1616 	return 0;
1617 }
1618 #endif
1619 
1620 /*
1621  * <= 64 : cpu mode transt
1622  * > 64  : dma mode transt
1623  * wait for done completion in this function, wakup in the irq hanlder
1624  * transt one message->transfer to slave devices
1625  */
sunxi_spi_transfer_one(struct spi_controller * master,struct spi_device * spi,struct spi_transfer * t)1626 static int sunxi_spi_transfer_one(struct spi_controller *master,
1627 					struct spi_device *spi,
1628 					struct spi_transfer *t)
1629 {
1630 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1631 	void __iomem *base_addr = sspi->base_addr;
1632 	unsigned char *tx_buf = (unsigned char *)t->tx_buf;
1633 	unsigned char *rx_buf = (unsigned char *)t->rx_buf;
1634 	unsigned long timeout = 0;
1635 	int ret = 0;
1636 
1637 	dprintk(DEBUG_INFO, "[spi%d] begin transfer, txbuf %p, rxbuf %p, len %d\n",
1638 		spi->master->bus_num, tx_buf, rx_buf, t->len);
1639 	if ((!t->tx_buf && !t->rx_buf) || !t->len)
1640 		return -EINVAL;
1641 
1642 	if (sunxi_spi_xfer_setup(spi, t) < 0)
1643 		return -EINVAL;
1644 
1645 	/* write 1 to clear 0 */
1646 	spi_clr_irq_pending(SPI_INT_STA_MASK, base_addr);
1647 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1648 	/* disable all DRQ */
1649 	spi_disable_dma_irq(SPI_FIFO_CTL_DRQEN_MASK, base_addr);
1650 #endif
1651 	/* reset tx/rx fifo */
1652 	//spi_reset_fifo(base_addr);
1653 
1654 	if (sunxi_spi_mode_check(sspi, spi, t))
1655 		return -EINVAL;
1656 
1657 	if (sspi->dbi_enabled) {
1658 		spi_config_dbi(sspi, spi);
1659 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1660 		spi_enable_dbi_dma(base_addr);
1661 #endif
1662 	} else {
1663 		/* reset tx/rx fifo */
1664 		spi_reset_fifo(base_addr);
1665 		/*	1. Tx/Rx error irq,process in IRQ;
1666 			2. Transfer Complete Interrupt Enable
1667 		*/
1668 		spi_enable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, base_addr);
1669 	}
1670 
1671 	if ((debug_mask & DEBUG_INIT) && (debug_mask & DEBUG_DATA)) {
1672 		dprintk(DEBUG_DATA, "[spi%d] dump reg:\n", sspi->master->bus_num);
1673 		spi_dump_reg(sspi, 0, 0x40);
1674 	}
1675 
1676 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1677 	sunxi_spi_dma_transfer(spi, t);
1678 #else
1679 	sunxi_spi_cpu_transfer(spi, t);
1680 #endif
1681 
1682 	if ((debug_mask & DEBUG_INIT) && (debug_mask & DEBUG_DATA)) {
1683 		dprintk(DEBUG_DATA, "[spi%d] dump reg:\n", sspi->master->bus_num);
1684 		spi_dump_reg(sspi, 0, 0x40);
1685 	}
1686 
1687 	/* wait for xfer complete in the isr. */
1688 	timeout = wait_for_completion_timeout(
1689 				&sspi->done,
1690 				msecs_to_jiffies(XFER_TIMEOUT));
1691 	if (timeout == 0) {
1692 		SPI_ERR("[spi%d] xfer timeout\n", spi->master->bus_num);
1693 		ret = -1;
1694 	} else if (sspi->result < 0) {
1695 		SPI_ERR("[spi%d] xfer failed...\n", spi->master->bus_num);
1696 		ret = -1;
1697 	}
1698 
1699 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1700 	/* release dma resource if necessary */
1701 	sunxi_spi_release_dma(sspi, t);
1702 
1703 	if (sspi->dbi_enabled)
1704 		spi_disable_dbi_dma(base_addr);
1705 #endif
1706 
1707 	if (sspi->mode_type != MODE_TYPE_NULL)
1708 		sspi->mode_type = MODE_TYPE_NULL;
1709 
1710 	return ret;
1711 }
1712 
1713 /* wake up the sleep thread, and give the result code */
sunxi_spi_handler(int irq,void * dev_id)1714 static irqreturn_t sunxi_spi_handler(int irq, void *dev_id)
1715 {
1716 	struct sunxi_spi *sspi = (struct sunxi_spi *)dev_id;
1717 	void __iomem *base_addr = sspi->base_addr;
1718 	unsigned int status = 0, enable = 0;
1719 	unsigned long flags = 0;
1720 
1721 	spin_lock_irqsave(&sspi->lock, flags);
1722 
1723 	enable = spi_qry_irq_enable(base_addr);
1724 	status = spi_qry_irq_pending(base_addr);
1725 	spi_clr_irq_pending(status, base_addr);
1726 	dprintk(DEBUG_INFO, "[spi%d] irq status = %x\n", sspi->master->bus_num, status);
1727 
1728 	sspi->result = 0; /* assume succeed */
1729 
1730 	if (sspi->mode) {
1731 		if ((enable & SPI_INTEN_RX_RDY) && (status & SPI_INT_STA_RX_RDY)) {
1732 			dprintk(DEBUG_INFO, "[spi%d] spi data is ready\n", sspi->master->bus_num);
1733 			spi_disable_irq(SPI_INT_STA_RX_RDY, base_addr);
1734 			wake_up_process(sspi->task);
1735 			spin_unlock_irqrestore(&sspi->lock, flags);
1736 			return IRQ_HANDLED;
1737 		}
1738 	}
1739 
1740 	/* master mode, Transfer Complete Interrupt */
1741 	if (status & SPI_INT_STA_TC) {
1742 		dprintk(DEBUG_INFO, "[spi%d] SPI TC comes\n", sspi->master->bus_num);
1743 		spi_disable_irq(SPI_INT_STA_TC | SPI_INT_STA_ERR, base_addr);
1744 
1745 		/*wakup uplayer, by the sem */
1746 		complete(&sspi->done);
1747 		spin_unlock_irqrestore(&sspi->lock, flags);
1748 		return IRQ_HANDLED;
1749 	} else if (status & SPI_INT_STA_ERR) { /* master mode:err */
1750 		SPI_ERR("[spi%d]  SPI ERR %#x comes\n", sspi->master->bus_num, status);
1751 		/* error process, release dma in the workqueue,should not be here */
1752 		spi_disable_irq(SPI_INT_STA_TC | SPI_INT_STA_ERR, base_addr);
1753 		spi_soft_reset(base_addr);
1754 		sspi->result = -1;
1755 		complete(&sspi->done);
1756 		spin_unlock_irqrestore(&sspi->lock, flags);
1757 		return IRQ_HANDLED;
1758 	}
1759 	if (sspi->dbi_enabled) {
1760 		status = dbi_qry_irq_pending(base_addr);
1761 		dbi_clr_irq_pending(status, base_addr);
1762 		dprintk(DEBUG_INFO, "[dbi%d] irq status = %x\n",
1763 				sspi->master->bus_num, status);
1764 		if ((status & DBI_INT_FIFO_EMPTY) && !(sspi->dbi_config->dbi_mode
1765 					& SPI_DBI_TRANSMIT_VIDEO_)) {
1766 			dprintk(DEBUG_INFO, "[spi%d] DBI Fram TC comes\n",
1767 					sspi->master->bus_num);
1768 			dbi_disable_irq(DBI_FIFO_EMPTY_INT_EN, base_addr);
1769 			/*wakup uplayer, by the sem */
1770 			complete(&sspi->done);
1771 			spin_unlock_irqrestore(&sspi->lock, flags);
1772 			return IRQ_HANDLED;
1773 		} else if (((status & DBI_INT_TE_INT) ||
1774 			    (status & DBI_INT_STA_FRAME)) &&
1775 			   (sspi->dbi_config->dbi_mode & SPI_DBI_TRANSMIT_VIDEO_)) {
1776 			if (sspi->dbi_config->dbi_vsync_handle &&
1777 			    (status & DBI_INT_TE_INT))
1778 				sspi->dbi_config->dbi_vsync_handle(
1779 				    (unsigned long)sspi->spi);
1780 			else
1781 				dbi_disable_irq(DBI_FRAM_DONE_INT_EN, base_addr);
1782 			complete(&sspi->done);
1783 			spin_unlock_irqrestore(&sspi->lock, flags);
1784 			return IRQ_HANDLED;
1785 		} else {
1786 			//TODO: Adapt to other states
1787 			spin_unlock_irqrestore(&sspi->lock, flags);
1788 			return IRQ_HANDLED;
1789 		}
1790 	}
1791 	dprintk(DEBUG_INFO, "[spi%d] SPI NONE comes\n", sspi->master->bus_num);
1792 	spin_unlock_irqrestore(&sspi->lock, flags);
1793 	return IRQ_NONE;
1794 }
1795 
sunxi_spi_setup(struct spi_device * spi)1796 static int sunxi_spi_setup(struct spi_device *spi)
1797 {
1798 	struct sunxi_spi *sspi = spi_master_get_devdata(spi->master);
1799 
1800 	sspi->spi = spi;
1801 
1802 	return 0;
1803 }
1804 
sunxi_spi_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * xfer)1805 static bool sunxi_spi_can_dma(struct spi_master *master, struct spi_device *spi,
1806 				 struct spi_transfer *xfer)
1807 {
1808 	return (xfer->len > BULK_DATA_BOUNDARY);
1809 }
1810 
sunxi_spi_slave_set_txdata(struct sunxi_spi_slave_head * head)1811 static struct device_data *sunxi_spi_slave_set_txdata(struct sunxi_spi_slave_head *head)
1812 {
1813 	u8 *buf, i;
1814 	struct device_data *data;
1815 
1816 	buf = kzalloc(head->len, GFP_KERNEL);
1817 	if (IS_ERR_OR_NULL(buf)) {
1818 		SPI_ERR("failed to alloc mem\n");
1819 		goto err0;
1820 	}
1821 
1822 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1823 	if (IS_ERR_OR_NULL(data)) {
1824 		SPI_ERR("failed to alloc mem\n");
1825 		goto err1;
1826 	}
1827 
1828 	for (i = 0; i < head->len; i++) {
1829 		buf[i] = i + SAMPLE_NUMBER;
1830 	}
1831 	udelay(100);
1832 
1833 	dprintk(DEBUG_DATA, "[debugging only] send data:\n");
1834 	if (debug_mask & DEBUG_DATA)
1835 		spi_dump_data(buf, head->len);
1836 
1837 	data->tx_buf = buf;
1838 	data->len = head->len;
1839 
1840 	return data;
1841 
1842 err1:
1843 	kfree(buf);
1844 err0:
1845 	return NULL;
1846 }
1847 
sunxi_spi_slave_cpu_tx_config(struct sunxi_spi * sspi)1848 static int sunxi_spi_slave_cpu_tx_config(struct sunxi_spi *sspi)
1849 {
1850 	int ret = 0, i;
1851 	u32 poll_time = 0x7ffffff;
1852 	unsigned long timeout = 0;
1853 	unsigned long flags = 0;
1854 
1855 	dprintk(DEBUG_INFO, "[spi%d] receive pkt head ok\n", sspi->master->bus_num);
1856 	if (sspi->slave->set_up_txdata) {
1857 		sspi->slave->data = sspi->slave->set_up_txdata(sspi->slave->head);
1858 		if (IS_ERR_OR_NULL(sspi->slave->data)) {
1859 			SPI_ERR("[spi%d] null data\n", sspi->master->bus_num);
1860 			ret = -1;
1861 			goto err;
1862 		}
1863 	} else {
1864 		SPI_ERR("[spi%d] none define set_up_txdata\n", sspi->master->bus_num);
1865 		ret = -1;
1866 		goto err;
1867 	}
1868 
1869 	sspi->done.done = 0;
1870 	spi_clr_irq_pending(SPI_INT_STA_MASK, sspi->base_addr);
1871 	spi_disable_irq(SPI_INTEN_RX_RDY, sspi->base_addr);
1872 	spi_reset_fifo(sspi->base_addr);
1873 	spi_set_bc_tc_stc(0, 0, 0, 0, sspi->base_addr);
1874 	spi_enable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, sspi->base_addr);
1875 
1876 	dprintk(DEBUG_INFO, "[spi%d] to be send data init ok\n", sspi->master->bus_num);
1877 	spin_lock_irqsave(&sspi->lock, flags);
1878 	for (i = 0; i < sspi->slave->head->len; i++) {
1879 		while ((spi_query_txfifo(sspi->base_addr) >= MAX_FIFU) && (--poll_time))
1880 			;
1881 		if (poll_time == 0) {
1882 			dprintk(DEBUG_INFO, "[spi%d]cpu send data timeout\n", sspi->master->bus_num);
1883 			goto err;
1884 		}
1885 
1886 		writeb(sspi->slave->data->tx_buf[i], sspi->base_addr + SPI_TXDATA_REG);
1887 	}
1888 	spin_unlock_irqrestore(&sspi->lock, flags);
1889 
1890 	dprintk(DEBUG_INFO, "[spi%d] already send data to fifo\n", sspi->master->bus_num);
1891 
1892 	/* wait for xfer complete in the isr. */
1893 	timeout = wait_for_completion_timeout(
1894 				&sspi->done,
1895 				msecs_to_jiffies(XFER_TIMEOUT));
1896 	if (timeout == 0) {
1897 		SPI_ERR("[spi%d] xfer timeout\n", sspi->master->bus_num);
1898 		ret = -1;
1899 		goto err;
1900 	} else if (sspi->result < 0) {
1901 		SPI_ERR("[spi%d] xfer failed...\n", sspi->master->bus_num);
1902 		ret = -1;
1903 		goto err;
1904 	}
1905 
1906 err:
1907 	spi_clr_irq_pending(SPI_INT_STA_MASK, sspi->base_addr);
1908 	spi_disable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, sspi->base_addr);
1909 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1910 	spi_disable_dma_irq(SPI_FIFO_CTL_DRQEN_MASK, sspi->base_addr);
1911 #endif
1912 	spi_reset_fifo(sspi->base_addr);
1913 	kfree(sspi->slave->data->tx_buf);
1914 	kfree(sspi->slave->data);
1915 
1916 	return ret;
1917 }
1918 
sunxi_spi_slave_cpu_rx_config(struct sunxi_spi * sspi)1919 static int sunxi_spi_slave_cpu_rx_config(struct sunxi_spi *sspi)
1920 {
1921 	int ret = 0, i;
1922 	u32 poll_time = 0x7ffffff;
1923 
1924 	dprintk(DEBUG_INFO, "[spi%d] receive pkt head ok\n", sspi->master->bus_num);
1925 	sspi->slave->data = kzalloc(sizeof(struct device_data), GFP_KERNEL);
1926 	if (IS_ERR_OR_NULL(sspi->slave->data)) {
1927 		SPI_ERR("failed to alloc mem\n");
1928 		ret = -ENOMEM;
1929 		goto err0;
1930 	}
1931 
1932 	sspi->slave->data->len = sspi->slave->head->len;
1933 	sspi->slave->data->rx_buf = kzalloc(sspi->slave->data->len, GFP_KERNEL);
1934 	if (IS_ERR_OR_NULL(sspi->slave->data->rx_buf)) {
1935 		SPI_ERR("failed to alloc mem\n");
1936 		ret = -ENOMEM;
1937 		goto err1;
1938 	}
1939 
1940 	sspi->done.done = 0;
1941 
1942 	spi_set_rx_trig(sspi->slave->data->len/2, sspi->base_addr);
1943 	spi_enable_irq(SPI_INTEN_ERR|SPI_INTEN_RX_RDY, sspi->base_addr);
1944 	spi_set_bc_tc_stc(0, 0, 0, 0, sspi->base_addr);
1945 
1946 	dprintk(DEBUG_INFO, "[spi%d] to be receive data init ok\n", sspi->master->bus_num);
1947 	for (i = 0; i < sspi->slave->data->len; i++) {
1948 		while (!spi_query_rxfifo(sspi->base_addr) && (--poll_time > 0))
1949 			;
1950 		sspi->slave->data->rx_buf[i] =  readb(sspi->base_addr + SPI_RXDATA_REG);
1951 	}
1952 
1953 
1954 	if (poll_time <= 0) {
1955 		SPI_ERR("[spi%d] cpu receive pkt head time out!\n", sspi->master->bus_num);
1956 		spi_reset_fifo(sspi->base_addr);
1957 		ret = -1;
1958 		goto err2;
1959 	} else if (sspi->result < 0) {
1960 		SPI_ERR("[spi%d] xfer failed...\n", sspi->master->bus_num);
1961 		spi_reset_fifo(sspi->base_addr);
1962 		ret = -1;
1963 		goto err2;
1964 	}
1965 
1966 	dprintk(DEBUG_DATA, "[debugging only] receive data:\n");
1967 	if (debug_mask & DEBUG_DATA)
1968 		spi_dump_data(sspi->slave->data->rx_buf, sspi->slave->data->len);
1969 
1970 err2:
1971 	spi_clr_irq_pending(SPI_INT_STA_MASK, sspi->base_addr);
1972 	spi_disable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, sspi->base_addr);
1973 #if IS_ENABLED(CONFIG_DMA_ENGINE)
1974 	spi_disable_dma_irq(SPI_FIFO_CTL_DRQEN_MASK, sspi->base_addr);
1975 #endif
1976 	spi_reset_fifo(sspi->base_addr);
1977 	kfree(sspi->slave->data->rx_buf);
1978 err1:
1979 	kfree(sspi->slave->data);
1980 err0:
1981 	return ret;
1982 }
sunxi_spi_slave_handle_head(struct sunxi_spi * sspi,u8 * buf)1983 static int sunxi_spi_slave_handle_head(struct sunxi_spi *sspi, u8 *buf)
1984 {
1985 	struct sunxi_spi_slave_head *head;
1986 	int ret = 0;
1987 
1988 	head = kzalloc(sizeof(*head), GFP_KERNEL);
1989 	if (IS_ERR_OR_NULL(head)) {
1990 		SPI_ERR("failed to alloc mem\n");
1991 		ret = -ENOMEM;
1992 		goto err0;
1993 	}
1994 
1995 	head->op_code = buf[OP_MASK];
1996 	head->addr = (buf[ADDR_MASK_0] << 16) | (buf[ADDR_MASK_1] << 8) | buf[ADDR_MASK_2];
1997 	head->len = buf[LENGTH_MASK];
1998 
1999 	dprintk(DEBUG_INFO, "[spi%d] op=0x%x addr=0x%x len=0x%x\n",
2000 			sspi->master->bus_num, head->op_code, head->addr, head->len);
2001 
2002 	sspi->slave->head = head;
2003 
2004 	if (head->len > 64) {
2005 		dprintk(DEBUG_INFO, "[spi%d] length must less than 64 bytes\n", sspi->master->bus_num);
2006 		ret = -1;
2007 		goto err1;
2008 	}
2009 
2010 	if (head->op_code == SUNXI_OP_WRITE) {
2011 		sunxi_spi_slave_cpu_rx_config(sspi);
2012 	} else if (head->op_code == SUNXI_OP_READ) {
2013 		sunxi_spi_slave_cpu_tx_config(sspi);
2014 	} else {
2015 		dprintk(DEBUG_INFO, "[spi%d] pkt head opcode err\n", sspi->master->bus_num);
2016 		ret = -1;
2017 		goto err1;
2018 	}
2019 err1:
2020 	kfree(head);
2021 err0:
2022 	spi_clr_irq_pending(SPI_INT_STA_MASK, sspi->base_addr);
2023 	spi_disable_irq(SPI_INTEN_RX_RDY, sspi->base_addr);
2024 	spi_disable_irq(SPI_INTEN_TC|SPI_INTEN_ERR, sspi->base_addr);
2025 	spi_reset_fifo(sspi->base_addr);
2026 
2027 	return ret;
2028 }
2029 
sunxi_spi_slave_task(void * data)2030 static int sunxi_spi_slave_task(void *data)
2031 {
2032 	u8 *pkt_head, i;
2033 	u32 poll_time = 0x7ffffff;
2034 	unsigned long flags = 0;
2035 	struct sunxi_spi *sspi = (struct sunxi_spi *)data;
2036 
2037 	pkt_head = kzalloc(HEAD_LEN, GFP_KERNEL);
2038 	if (IS_ERR_OR_NULL(pkt_head)) {
2039 		SPI_ERR("[spi%d] failed to alloc mem\n", sspi->master->bus_num);
2040 		return -ENOMEM;
2041 	}
2042 
2043 	allow_signal(SIGKILL);
2044 
2045 	while (!kthread_should_stop()) {
2046 		spi_reset_fifo(sspi->base_addr);
2047 		spi_clr_irq_pending(SPI_INT_STA_MASK, sspi->base_addr);
2048 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2049 		spi_disable_dma_irq(SPI_FIFO_CTL_DRQEN_MASK, sspi->base_addr);
2050 #endif
2051 		spi_enable_irq(SPI_INTEN_ERR|SPI_INTEN_RX_RDY, sspi->base_addr);
2052 		spi_set_rx_trig(HEAD_LEN, sspi->base_addr);
2053 		spi_set_bc_tc_stc(0, 0, 0, 0, sspi->base_addr);
2054 
2055 		dprintk(DEBUG_INFO, "[spi%d] receive pkt head init ok, sleep and wait for data\n", sspi->master->bus_num);
2056 		set_current_state(TASK_INTERRUPTIBLE);
2057 		schedule();
2058 
2059 		dprintk(DEBUG_INFO, "[spi%d] data is come, wake up and receive pkt head\n", sspi->master->bus_num);
2060 
2061 		for (i = 0; i < HEAD_LEN ; i++) {
2062 			while (!spi_query_rxfifo(sspi->base_addr) && (--poll_time > 0))
2063 				;
2064 			pkt_head[i] =  readb(sspi->base_addr + SPI_RXDATA_REG);
2065 		}
2066 
2067 		if (poll_time <= 0) {
2068 			SPI_ERR("[spi%d] cpu receive pkt head time out!\n", sspi->master->bus_num);
2069 			spi_reset_fifo(sspi->base_addr);
2070 			continue;
2071 		} else if (sspi->result < 0) {
2072 			SPI_ERR("[spi%d] xfer failed...\n", sspi->master->bus_num);
2073 			spi_reset_fifo(sspi->base_addr);
2074 			continue;
2075 		}
2076 
2077 		sunxi_spi_slave_handle_head(sspi, pkt_head);
2078 	}
2079 
2080 	spin_lock_irqsave(&sspi->lock, flags);
2081 	sspi->task_flag = 1;
2082 	spin_unlock_irqrestore(&sspi->lock, flags);
2083 
2084 	kfree(pkt_head);
2085 
2086 	return 0;
2087 }
2088 
sunxi_spi_select_gpio_state(struct pinctrl * pctrl,char * name,u32 no)2089 static int sunxi_spi_select_gpio_state(struct pinctrl *pctrl, char *name, u32 no)
2090 {
2091 	int ret = 0;
2092 	struct pinctrl_state *pctrl_state = NULL;
2093 
2094 	pctrl_state = pinctrl_lookup_state(pctrl, name);
2095 	if (IS_ERR(pctrl_state)) {
2096 		SPI_ERR("[spi%d] pinctrl_lookup_state(%s) failed! return %p\n", no, name, pctrl_state);
2097 		return -1;
2098 	}
2099 
2100 	ret = pinctrl_select_state(pctrl, pctrl_state);
2101 	if (ret < 0)
2102 		SPI_ERR("[spi%d] pinctrl_select_state(%s) failed! return %d\n", no, name, ret);
2103 
2104 	return ret;
2105 }
2106 
sunxi_spi_request_gpio(struct sunxi_spi * sspi)2107 static int sunxi_spi_request_gpio(struct sunxi_spi *sspi)
2108 {
2109 	int bus_no = sspi->pdev->id;
2110 
2111 	sspi->pctrl = devm_pinctrl_get(&sspi->pdev->dev);
2112 	if (IS_ERR(sspi->pctrl)) {
2113 		SPI_ERR("[spi%d] devm_pinctrl_get() failed! return %ld\n",
2114 				sspi->master->bus_num, PTR_ERR(sspi->pctrl));
2115 		return -1;
2116 	}
2117 
2118 	return sunxi_spi_select_gpio_state(sspi->pctrl, PINCTRL_STATE_DEFAULT, bus_no);
2119 }
2120 
2121 /*
2122 static void sunxi_spi_release_gpio(struct sunxi_spi *sspi)
2123 {
2124 	devm_pinctrl_put(sspi->pctrl);
2125 	sspi->pctrl = NULL;
2126 }
2127 */
2128 
sunxi_spi_resource_get(struct sunxi_spi * sspi)2129 static int sunxi_spi_resource_get(struct sunxi_spi *sspi)
2130 {
2131 	int ret;
2132 	struct device_node *np = sspi->pdev->dev.of_node;
2133 	struct sunxi_spi_platform_data *pdata = sspi->pdev->dev.platform_data;
2134 	struct device *dev = &(sspi->pdev->dev);
2135 
2136 	ret = spi_regulator_request(pdata, dev);
2137 	if (ret < 0) {
2138 		SPI_ERR("[spi%d] request regulator failed!\n", sspi->master->bus_num);
2139 		return ret;
2140 	}
2141 
2142 	ret = of_property_read_u32(sspi->pdev->dev.of_node, "clock-frequency",
2143 			&pdata->sclk_freq_def);
2144 	if (ret) {
2145 		SPI_ERR("[spi%d] Get clock-frequency property failed\n", sspi->master->bus_num);
2146 		return -1;
2147 	}
2148 
2149 	ret = of_property_read_u32(np, "spi_slave_mode", &sspi->mode);
2150 	if (sspi->mode)
2151 		dprintk(DEBUG_INIT, "[spi%d] SPI SLAVE MODE\n", sspi->master->bus_num);
2152 	else
2153 		dprintk(DEBUG_INIT, "[spi%d] SPI MASTER MODE\n", sspi->master->bus_num);
2154 
2155 	if (sunxi_spi_request_gpio(sspi) < 0) {
2156 		SPI_ERR("[spi%d] Request GPIO failed!\n", sspi->master->bus_num);
2157 		return -1;
2158 	}
2159 	sspi->pclk = devm_clk_get(&sspi->pdev->dev, "pll");
2160 	if (IS_ERR_OR_NULL(sspi->pclk)) {
2161 		SPI_ERR("[spi%d] Unable to acquire module clock '%s', return %x\n",
2162 			sspi->master->bus_num, sspi->dev_name,
2163 			PTR_ERR_OR_ZERO(sspi->pclk));
2164 		return -ENXIO;
2165 	}
2166 
2167 	sspi->mclk = devm_clk_get(&sspi->pdev->dev, "mod");
2168 	if (IS_ERR_OR_NULL(sspi->mclk)) {
2169 		SPI_ERR("[spi%d] Unable to acquire module clock '%s', return %x\n",
2170 			sspi->master->bus_num, sspi->dev_name,
2171 			PTR_ERR_OR_ZERO(sspi->mclk));
2172 		return -ENXIO;
2173 	}
2174 
2175 	sspi->bus_clk = devm_clk_get(&sspi->pdev->dev, "bus");
2176 	if (IS_ERR_OR_NULL(sspi->bus_clk)) {
2177 		SPI_ERR("[spi%d] Unable to acquire bus clock '%s', return %x\n",
2178 			sspi->master->bus_num, sspi->dev_name,
2179 			PTR_ERR_OR_ZERO(sspi->bus_clk));
2180 		return -ENXIO;
2181 	}
2182 
2183 	if (!sspi->reset) {
2184 		sspi->reset = devm_reset_control_get(&sspi->pdev->dev, NULL);
2185 		if (IS_ERR_OR_NULL(sspi->reset)) {
2186 			SPI_ERR("[spi%d] Unable to acquire reset clock '%s', return %x\n",
2187 				sspi->master->bus_num, sspi->dev_name,
2188 				PTR_ERR_OR_ZERO(sspi->reset));
2189 			return -ENXIO;
2190 		}
2191 	}
2192 
2193 	ret = of_property_read_u32(np, "sample_mode", &sspi->sample_mode);
2194 	if (ret) {
2195 		SPI_ERR("Failed to get sample mode\n");
2196 		sspi->sample_mode = SAMP_MODE_DL_DEFAULT;
2197 	}
2198 	ret = of_property_read_u32(np, "sample_delay", &sspi->sample_delay);
2199 	if (ret) {
2200 		SPI_ERR("Failed to get sample delay\n");
2201 		sspi->sample_delay = SAMP_MODE_DL_DEFAULT;
2202 	}
2203 	dprintk(DEBUG_INIT, "sample_mode:%d sample_delay:%d\n",
2204 				sspi->sample_mode, sspi->sample_delay);
2205 
2206 	return 0;
2207 }
2208 
sunxi_spi_clk_init(struct sunxi_spi * sspi,u32 mod_clk)2209 static int sunxi_spi_clk_init(struct sunxi_spi *sspi, u32 mod_clk)
2210 {
2211 	int ret = 0;
2212 	long rate = 0;
2213 
2214 	/*assert and deassert constitute a complete hardware reset operation*/
2215 	ret = reset_control_assert(sspi->reset);
2216 	if (ret != 0) {
2217 		SPI_ERR("[spi%d] Unable to assert reset clock '%s', return %x\n",
2218 			sspi->master->bus_num, sspi->dev_name,
2219 			PTR_ERR_OR_ZERO(sspi->mclk));
2220 		return -ENXIO;
2221 	}
2222 	ret = reset_control_deassert(sspi->reset);
2223 	if (ret != 0) {
2224 		SPI_ERR("[spi%d] Unable to deassert reset clock '%s', return %x\n",
2225 			sspi->master->bus_num, sspi->dev_name,
2226 			PTR_ERR_OR_ZERO(sspi->mclk));
2227 		return -ENXIO;
2228 	}
2229 
2230 	if (clk_prepare_enable(sspi->pclk)) {
2231 		SPI_ERR("[spi%d] Couldn't enable pll clock 'spi'\n", sspi->master->bus_num);
2232 		goto err1;
2233 	}
2234 
2235 	ret = clk_set_parent(sspi->mclk, sspi->pclk);
2236 	if (ret != 0) {
2237 		SPI_ERR("[spi%d] clk_set_parent() failed! return %d\n",
2238 			sspi->master->bus_num, ret);
2239 		goto err2;
2240 	}
2241 
2242 	rate = clk_round_rate(sspi->mclk, mod_clk);
2243 	if (clk_set_rate(sspi->mclk, rate)) {
2244 		SPI_ERR("[spi%d] spi clk_set_rate failed\n", sspi->master->bus_num);
2245 		goto err2;
2246 	}
2247 
2248 	dprintk(DEBUG_INIT, "[spi%d] mclk %u\n", sspi->master->bus_num, (unsigned)clk_get_rate(sspi->mclk));
2249 
2250 	if (clk_prepare_enable(sspi->mclk)) {
2251 		SPI_ERR("[spi%d] Couldn't enable module clock 'spi'\n", sspi->master->bus_num);
2252 		goto err2;
2253 	}
2254 
2255 	if (clk_prepare_enable(sspi->bus_clk)) {
2256 		SPI_ERR("[spi%d] Couldn't enable bus clock 'spi'\n", sspi->master->bus_num);
2257 		goto err3;
2258 	}
2259 
2260 	return clk_get_rate(sspi->mclk);
2261 
2262 err3:
2263 	clk_disable_unprepare(sspi->bus_clk);
2264 err2:
2265 	clk_disable_unprepare(sspi->mclk);
2266 err1:
2267 	clk_disable_unprepare(sspi->pclk);
2268 
2269 	return -1;
2270 }
2271 
sunxi_spi_clk_exit(struct sunxi_spi * sspi)2272 static int sunxi_spi_clk_exit(struct sunxi_spi *sspi)
2273 {
2274 	if (IS_ERR_OR_NULL(sspi->mclk)) {
2275 		SPI_ERR("[spi%d] SPI mclk handle is invalid!\n", sspi->master->bus_num);
2276 		return -1;
2277 	}
2278 
2279 	clk_disable_unprepare(sspi->bus_clk);
2280 	clk_disable_unprepare(sspi->mclk);
2281 	return 0;
2282 }
2283 
sunxi_spi_hw_init(struct sunxi_spi * sspi,struct sunxi_spi_platform_data * pdata,struct device * dev)2284 static int sunxi_spi_hw_init(struct sunxi_spi *sspi,
2285 		struct sunxi_spi_platform_data *pdata, struct device *dev)
2286 {
2287 	void __iomem *base_addr = sspi->base_addr;
2288 	u32 sclk_freq_def = 0;
2289 	int sclk_freq = 0;
2290 	int bus_no = sspi->pdev->id;
2291 
2292 	spi_regulator_enable(pdata);
2293 
2294 	sclk_freq = sunxi_spi_clk_init(sspi, pdata->sclk_freq_def);
2295 	if (sclk_freq < 0) {
2296 		SPI_ERR("[spi%d] sunxi_spi_clk_init(%s) failed!\n", sspi->master->bus_num, sspi->dev_name);
2297 		return -1;
2298 	}
2299 
2300 	if (!sspi->dbi_enabled) {
2301 		/* enable the spi module */
2302 		spi_enable_bus(base_addr);
2303 	}
2304 
2305 	sunxi_spi_select_gpio_state(sspi->pctrl, PINCTRL_STATE_DEFAULT, bus_no);
2306 
2307 	if (sspi->dbi_enabled) {
2308 		spi_set_slave(base_addr);
2309 		spi_set_dbi(base_addr);
2310 		spi_enable_dbi(base_addr);
2311 		spi_set_clk(10000000, sclk_freq, sspi);
2312 		spi_enable_tp(base_addr);
2313 	}
2314 	if (!sspi->mode) {
2315 		/* master: set spi module clock;
2316 		 * set the default frequency	10MHz
2317 		 */
2318 		spi_set_master(base_addr);
2319 		spi_set_clk(10000000, sclk_freq, sspi);
2320 		/* master : set POL,PHA,SSOPL,LMTF,DDB,DHB; default: SSCTL=0,SMC=1,TBW=0. */
2321 		spi_config_tc(1, SPI_MODE_0, base_addr);
2322 		spi_enable_tp(base_addr);
2323 		/* manual control the chip select */
2324 		sunxi_spi_ss_ctrl(base_addr, true);
2325 	} else {
2326 		//slave
2327 		spi_set_slave(base_addr);
2328 		/* master : set POL,PHA,SSOPL,LMTF,DDB,DHB; default: SSCTL=0,SMC=1,TBW=0. */
2329 		spi_config_tc(1, SPI_MODE_0, base_addr);
2330 		spi_set_clk(sclk_freq_def, sclk_freq, sspi);
2331 
2332 		if (sspi->sample_delay == SAMP_MODE_DL_DEFAULT) {
2333 			if (sclk_freq_def >= SPI_HIGH_FREQUENCY)
2334 				spi_sample_delay(0, 1, 0, base_addr);
2335 			else if (sclk_freq_def <= SPI_LOW_FREQUENCY)
2336 				spi_sample_delay(1, 0, 0, base_addr);
2337 			else
2338 				spi_sample_delay(0, 0, 0, base_addr);
2339 		} else {
2340 			spi_samp_mode_enable(1, base_addr);
2341 			spi_samp_dl_sw_status(1, base_addr);
2342 			spi_set_sample_mode(sspi->sample_mode, base_addr);
2343 			spi_set_sample_delay(sspi->sample_delay, base_addr);
2344 		}
2345 	}
2346 
2347 	/* reset fifo */
2348 	spi_reset_fifo(base_addr);
2349 
2350 	return 0;
2351 }
2352 
sunxi_spi_hw_exit(struct sunxi_spi * sspi,struct sunxi_spi_platform_data * pdata)2353 static int sunxi_spi_hw_exit(struct sunxi_spi *sspi, struct sunxi_spi_platform_data *pdata)
2354 {
2355 	struct spi_master *master = sspi->master;
2356 
2357 	/* release the  gpio */
2358 	//sunxi_spi_release_gpio(sspi);
2359 	sunxi_spi_select_gpio_state(sspi->pctrl, PINCTRL_STATE_SLEEP, master->bus_num);
2360 
2361 	/* disable the spi controller */
2362 	spi_disable_bus(sspi->base_addr);
2363 
2364 	/* disable module clock */
2365 	sunxi_spi_clk_exit(sspi);
2366 
2367 	/* disable regulator */
2368 	spi_regulator_disable(pdata);
2369 
2370 	return 0;
2371 }
2372 
sunxi_spi_info_show(struct device * dev,struct device_attribute * attr,char * buf)2373 static ssize_t sunxi_spi_info_show(struct device *dev,
2374 		struct device_attribute *attr, char *buf)
2375 {
2376 	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2377 	struct sunxi_spi_platform_data *pdata = dev->platform_data;
2378 
2379 	return snprintf(buf, PAGE_SIZE,
2380 		"pdev->id   = %d\n"
2381 		"pdev->name = %s\n"
2382 		"pdev->num_resources = %u\n"
2383 		"pdev->resource.mem = [%pa, %pa]\n"
2384 		"pdev->resource.irq = %pa\n"
2385 		"pdev->dev.platform_data.cs_num    = %d\n"
2386 		"pdev->dev.platform_data.regulator = 0x%p\n"
2387 		"pdev->dev.platform_data.regulator_id = %s\n",
2388 		pdev->id, pdev->name, pdev->num_resources,
2389 		&pdev->resource[0].start, &pdev->resource[0].end,
2390 		&pdev->resource[1].start, pdata->cs_num, pdata->regulator,
2391 		pdata->regulator_id);
2392 }
2393 static struct device_attribute sunxi_spi_info_attr =
2394 	__ATTR(info, S_IRUGO, sunxi_spi_info_show, NULL);
2395 
sunxi_spi_status_show(struct device * dev,struct device_attribute * attr,char * buf)2396 static ssize_t sunxi_spi_status_show(struct device *dev,
2397 		struct device_attribute *attr, char *buf)
2398 {
2399 	struct spi_master *master = dev_get_drvdata(dev);
2400 	struct sunxi_spi *sspi = (struct sunxi_spi *)&master[1];
2401 	char const *spi_mode[] = {"Single mode, half duplex read",
2402 				  "Single mode, half duplex write",
2403 				  "Single mode, full duplex read and write",
2404 				  "Dual mode, half duplex read",
2405 				  "Dual mode, half duplex write",
2406 				  "Null"};
2407 	char const *busy_state[] = {"Unknown", "Free", "Suspend", "Busy"};
2408 	char const *result_str[] = {"Success", "Fail"};
2409 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2410 	char const *dma_dir[] = {"DMA NULL", "DMA read", "DMA write"};
2411 #endif
2412 
2413 	if (master == NULL)
2414 		return snprintf(buf, PAGE_SIZE, "%s\n", "spi_master is NULL!");
2415 
2416 	return snprintf(buf, PAGE_SIZE,
2417 			"master->bus_num = %d\n"
2418 			"master->num_chipselect = %d\n"
2419 			"master->dma_alignment  = %d\n"
2420 			"master->mode_bits = %d\n"
2421 			"master->flags = 0x%x, ->bus_lock_flag = 0x%x\n"
2422 			"master->busy = %d, ->running = %d, ->rt = %d\n"
2423 			"sspi->mode_type = %d [%s]\n"
2424 			"sspi->irq = %d [%s]\n"
2425 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2426 			"sspi->dma_tx.dir = %d [%s]\n"
2427 			"sspi->dma_rx.dir = %d [%s]\n"
2428 #endif
2429 			"sspi->busy = %d [%s]\n"
2430 			"sspi->result = %d [%s]\n"
2431 			"sspi->base_addr = 0x%p, the SPI control register:\n"
2432 			"[VER] 0x%02x = 0x%08x, [GCR] 0x%02x = 0x%08x, [TCR] 0x%02x = 0x%08x\n"
2433 			"[ICR] 0x%02x = 0x%08x, [ISR] 0x%02x = 0x%08x, [FCR] 0x%02x = 0x%08x\n"
2434 			"[FSR] 0x%02x = 0x%08x, [WCR] 0x%02x = 0x%08x, [CCR] 0x%02x = 0x%08x\n"
2435 			"[BCR] 0x%02x = 0x%08x, [TCR] 0x%02x = 0x%08x, [BCC] 0x%02x = 0x%08x\n"
2436 			"[DMA] 0x%02x = 0x%08x, [TXR] 0x%02x = 0x%08x, [RXD] 0x%02x = 0x%08x\n",
2437 			master->bus_num, master->num_chipselect, master->dma_alignment,
2438 			master->mode_bits, master->flags, master->bus_lock_flag,
2439 			master->busy, master->running, master->rt,
2440 			sspi->mode_type, spi_mode[sspi->mode_type],
2441 			sspi->irq, sspi->dev_name,
2442 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2443 			sspi->dma_tx.dir, dma_dir[sspi->dma_tx.dir],
2444 			sspi->dma_rx.dir, dma_dir[sspi->dma_rx.dir],
2445 #endif
2446 			sspi->busy, busy_state[sspi->busy],
2447 			sspi->result, result_str[sspi->result],
2448 			sspi->base_addr,
2449 			SPI_VER_REG, readl(sspi->base_addr + SPI_VER_REG),
2450 			SPI_GC_REG, readl(sspi->base_addr + SPI_GC_REG),
2451 			SPI_TC_REG, readl(sspi->base_addr + SPI_TC_REG),
2452 			SPI_INT_CTL_REG, readl(sspi->base_addr + SPI_INT_CTL_REG),
2453 			SPI_INT_STA_REG, readl(sspi->base_addr + SPI_INT_STA_REG),
2454 
2455 			SPI_FIFO_CTL_REG, readl(sspi->base_addr + SPI_FIFO_CTL_REG),
2456 			SPI_FIFO_STA_REG, readl(sspi->base_addr + SPI_FIFO_STA_REG),
2457 			SPI_WAIT_CNT_REG, readl(sspi->base_addr + SPI_WAIT_CNT_REG),
2458 			SPI_CLK_CTL_REG, readl(sspi->base_addr + SPI_CLK_CTL_REG),
2459 			SPI_BURST_CNT_REG, readl(sspi->base_addr + SPI_BURST_CNT_REG),
2460 
2461 			SPI_TRANSMIT_CNT_REG, readl(sspi->base_addr + SPI_TRANSMIT_CNT_REG),
2462 			SPI_BCC_REG, readl(sspi->base_addr + SPI_BCC_REG),
2463 			SPI_DMA_CTL_REG, readl(sspi->base_addr + SPI_DMA_CTL_REG),
2464 			SPI_TXDATA_REG, readl(sspi->base_addr + SPI_TXDATA_REG),
2465 			SPI_RXDATA_REG, readl(sspi->base_addr + SPI_RXDATA_REG));
2466 }
2467 static struct device_attribute sunxi_spi_status_attr =
2468 	__ATTR(status, S_IRUGO, sunxi_spi_status_show, NULL);
2469 
sunxi_spi_create_sysfs(struct platform_device * _pdev)2470 static void sunxi_spi_create_sysfs(struct platform_device *_pdev)
2471 {
2472 	device_create_file(&_pdev->dev, &sunxi_spi_info_attr);
2473 	device_create_file(&_pdev->dev, &sunxi_spi_status_attr);
2474 }
2475 
sunxi_spi_remove_sysfs(struct platform_device * _pdev)2476 static void sunxi_spi_remove_sysfs(struct platform_device *_pdev)
2477 {
2478 	device_remove_file(&_pdev->dev, &sunxi_spi_info_attr);
2479 	device_remove_file(&_pdev->dev, &sunxi_spi_status_attr);
2480 }
2481 
sunxi_spi_probe(struct platform_device * pdev)2482 static int sunxi_spi_probe(struct platform_device *pdev)
2483 {
2484 	struct device_node *np = pdev->dev.of_node;
2485 	struct resource	*mem_res;
2486 	struct sunxi_spi *sspi;
2487 	struct sunxi_spi_platform_data *pdata;
2488 	struct spi_master *master;
2489 	struct sunxi_slave *slave = NULL;
2490 	char spi_para[16] = {0};
2491 	int ret = 0, err = 0, irq;
2492 
2493 	if (np == NULL) {
2494 		SPI_ERR("SPI failed to get of_node\n");
2495 		return -ENODEV;
2496 	}
2497 
2498 	pdev->id = of_alias_get_id(np, "spi");
2499 	if (pdev->id < 0) {
2500 		SPI_ERR("SPI failed to get alias id\n");
2501 		return -EINVAL;
2502 	}
2503 
2504 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2505 	pdev->dev.dma_mask = &sunxi_spi_dma_mask;
2506 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2507 #endif
2508 
2509 	pdata = kzalloc(sizeof(struct sunxi_spi_platform_data), GFP_KERNEL);
2510 	if (pdata == NULL) {
2511 		SPI_ERR("SPI failed to alloc mem\n");
2512 		return -ENOMEM;
2513 	}
2514 	pdev->dev.platform_data = pdata;
2515 
2516 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2517 	if (mem_res == NULL) {
2518 		SPI_ERR("Unable to get spi MEM resource\n");
2519 		ret = -ENXIO;
2520 		goto err0;
2521 	}
2522 
2523 	irq = platform_get_irq(pdev, 0);
2524 	if (irq < 0) {
2525 		SPI_ERR("No spi IRQ specified\n");
2526 		ret = -ENXIO;
2527 		goto err0;
2528 	}
2529 
2530 	snprintf(spi_para, sizeof(spi_para), "spi%d_cs_number", pdev->id);
2531 	ret = of_property_read_u32(np, spi_para, &pdata->cs_num);
2532 	if (ret) {
2533 		SPI_ERR("Failed to get cs_number property\n");
2534 		ret = -EINVAL;
2535 		goto err0;
2536 	}
2537 
2538 	/* create spi master */
2539 	master = spi_alloc_master(&pdev->dev, sizeof(struct sunxi_spi));
2540 	if (master == NULL) {
2541 		SPI_ERR("Unable to allocate SPI Master\n");
2542 		ret = -ENOMEM;
2543 		goto err0;
2544 	}
2545 
2546 	platform_set_drvdata(pdev, master);
2547 	sspi = spi_master_get_devdata(master);
2548 	memset(sspi, 0, sizeof(struct sunxi_spi));
2549 
2550 	sspi->master        = master;
2551 #if IS_ENABLED(CONFIG_DMA_ENGINE)
2552 	sspi->dma_rx.dir		= SPI_DMA_RWNULL;
2553 	sspi->dma_tx.dir		= SPI_DMA_RWNULL;
2554 #endif
2555 	sspi->busy			= SPI_FREE;
2556 	sspi->mode_type			= MODE_TYPE_NULL;
2557 	sspi->irq			= irq;
2558 
2559 	master->max_speed_hz		= SPI_MAX_FREQUENCY;
2560 	master->dev.of_node		= pdev->dev.of_node;
2561 	master->bus_num			= pdev->id;
2562 	master->setup			= sunxi_spi_setup;
2563 	master->can_dma			= sunxi_spi_can_dma;
2564 	master->transfer_one		= sunxi_spi_transfer_one;
2565 	master->use_gpio_descriptors	= true;
2566 	master->set_cs			= sunxi_spi_set_cs;
2567 	master->num_chipselect		= pdata->cs_num;
2568 	master->bits_per_word_mask	= SPI_BPW_MASK(8);
2569 	/* the spi->mode bits understood by this driver: */
2570 	master->mode_bits	= SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
2571 				| SPI_LSB_FIRST | SPI_TX_DUAL | SPI_TX_QUAD
2572 				| SPI_RX_DUAL | SPI_RX_QUAD;
2573 
2574 	ret = of_property_read_u32(np, "spi_dbi_enable", &sspi->dbi_enabled);
2575 	if (ret)
2576 		sspi->dbi_enabled = 0;
2577 	else
2578 		dprintk(DEBUG_INIT, "[spi%d] SPI DBI INTERFACE\n", sspi->master->bus_num);
2579 
2580 	if (sspi->dbi_enabled)
2581 		sspi->dbi_config = kzalloc(sizeof(struct spi_dbi_config), GFP_KERNEL);
2582 
2583 	snprintf(sspi->dev_name, sizeof(sspi->dev_name), SUNXI_SPI_DEV_NAME"%d", pdev->id);
2584 
2585 	err = devm_request_irq(&pdev->dev, irq, sunxi_spi_handler, 0,
2586 			sspi->dev_name, sspi);
2587 	if (err) {
2588 		SPI_ERR("[spi%d] Cannot request IRQ\n", sspi->master->bus_num);
2589 		ret = -EINVAL;
2590 		goto err1;
2591 	}
2592 
2593 	if (request_mem_region(mem_res->start,
2594 			resource_size(mem_res), pdev->name) == NULL) {
2595 		SPI_ERR("[spi%d] Req mem region failed\n", sspi->master->bus_num);
2596 		ret = -ENXIO;
2597 		goto err2;
2598 	}
2599 
2600 	sspi->base_addr = ioremap(mem_res->start, resource_size(mem_res));
2601 	if (sspi->base_addr == NULL) {
2602 		SPI_ERR("[spi%d] Unable to remap IO\n", sspi->master->bus_num);
2603 		ret = -ENXIO;
2604 		goto err3;
2605 	}
2606 
2607 	sspi->base_addr_phy = mem_res->start;
2608 	sspi->pdev = pdev;
2609 	pdev->dev.init_name = sspi->dev_name;
2610 
2611 	err = sunxi_spi_resource_get(sspi);
2612 	if (err) {
2613 		SPI_ERR("[spi%d] resource get error\n", sspi->master->bus_num);
2614 		ret = -EINVAL;
2615 		goto err1;
2616 	}
2617 
2618 	/* Setup Deufult Mode */
2619 	ret = sunxi_spi_hw_init(sspi, pdata, &pdev->dev);
2620 	if (ret != 0) {
2621 		SPI_ERR("[spi%d] spi hw init failed!\n", sspi->master->bus_num);
2622 		ret = -EINVAL;
2623 		goto err4;
2624 	}
2625 
2626 	spin_lock_init(&sspi->lock);
2627 	init_completion(&sspi->done);
2628 
2629 	if (sspi->mode) {
2630 		slave = kzalloc(sizeof(*slave), GFP_KERNEL);
2631 		if (IS_ERR_OR_NULL(slave)) {
2632 			SPI_ERR("[spi%d] failed to alloc mem\n", sspi->master->bus_num);
2633 			ret = -ENOMEM;
2634 			goto err5;
2635 		}
2636 		sspi->slave = slave;
2637 		sspi->slave->set_up_txdata = sunxi_spi_slave_set_txdata;
2638 		sspi->task = kthread_create(sunxi_spi_slave_task, sspi, "spi_slave");
2639 		if (IS_ERR(sspi->task)) {
2640 			SPI_ERR("[spi%d] unable to start kernel thread.\n", sspi->master->bus_num);
2641 			ret = PTR_ERR(sspi->task);
2642 			sspi->task = NULL;
2643 			ret = -EINVAL;
2644 			goto err6;
2645 		}
2646 
2647 		wake_up_process(sspi->task);
2648 	} else {
2649 		if (spi_register_master(master)) {
2650 			SPI_ERR("[spi%d] cannot register SPI master\n", sspi->master->bus_num);
2651 			ret = -EBUSY;
2652 			goto err6;
2653 		}
2654 	}
2655 
2656 	sunxi_spi_create_sysfs(pdev);
2657 
2658 	dprintk(DEBUG_INFO, "[spi%d] loaded for Bus with %d Slaves at most\n",
2659 		master->bus_num, master->num_chipselect);
2660 	dprintk(DEBUG_INIT, "[spi%d]: driver probe succeed, base %px, irq %d\n",
2661 		master->bus_num, sspi->base_addr, sspi->irq);
2662 	return 0;
2663 
2664 err6:
2665 	if (sspi->mode)
2666 		if (!IS_ERR_OR_NULL(slave))
2667 			kfree(slave);
2668 err5:
2669 	sunxi_spi_hw_exit(sspi, pdev->dev.platform_data);
2670 
2671 err4:
2672 	iounmap(sspi->base_addr);
2673 err3:
2674 	release_mem_region(mem_res->start, resource_size(mem_res));
2675 err2:
2676 	free_irq(sspi->irq, sspi);
2677 err1:
2678 	if (sspi->dbi_enabled)
2679 		kfree(sspi->dbi_config);
2680 	platform_set_drvdata(pdev, NULL);
2681 	spi_master_put(master);
2682 err0:
2683 	kfree(pdev->dev.platform_data);
2684 
2685 	return ret;
2686 }
2687 
sunxi_spi_remove(struct platform_device * pdev)2688 static int sunxi_spi_remove(struct platform_device *pdev)
2689 {
2690 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
2691 	struct sunxi_spi *sspi = spi_master_get_devdata(master);
2692 	struct resource	*mem_res;
2693 	unsigned long flags;
2694 
2695 	spin_lock_irqsave(&sspi->lock, flags);
2696 	sspi->busy |= SPI_FREE;
2697 	spin_unlock_irqrestore(&sspi->lock, flags);
2698 
2699 	while (sspi->busy & SPI_BUSY)
2700 		msleep(10);
2701 
2702 	sunxi_spi_remove_sysfs(pdev);
2703 	spi_unregister_master(master);
2704 
2705 	if (sspi->mode)
2706 		if (!sspi->task_flag)
2707 			if (!IS_ERR(sspi->task))
2708 				kthread_stop(sspi->task);
2709 
2710 	sunxi_spi_hw_exit(sspi, pdev->dev.platform_data);
2711 	iounmap(sspi->base_addr);
2712 
2713 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2714 	if (mem_res != NULL)
2715 		release_mem_region(mem_res->start, resource_size(mem_res));
2716 	free_irq(sspi->irq, sspi);
2717 
2718 	if (sspi->dbi_enabled)
2719 		kfree(sspi->dbi_config);
2720 
2721 	platform_set_drvdata(pdev, NULL);
2722 	spi_master_put(master);
2723 	kfree(pdev->dev.platform_data);
2724 
2725 	return 0;
2726 }
2727 
2728 #if IS_ENABLED(CONFIG_PM)
sunxi_spi_suspend(struct device * dev)2729 static int sunxi_spi_suspend(struct device *dev)
2730 {
2731 	struct platform_device *pdev = to_platform_device(dev);
2732 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
2733 	struct sunxi_spi *sspi = spi_master_get_devdata(master);
2734 	unsigned long flags;
2735 
2736 	spin_lock_irqsave(&sspi->lock, flags);
2737 	sspi->busy |= SPI_SUSPND;
2738 	spin_unlock_irqrestore(&sspi->lock, flags);
2739 
2740 	while (sspi->busy & SPI_BUSY)
2741 		msleep(10);
2742 
2743 	sunxi_spi_hw_exit(sspi, pdev->dev.platform_data);
2744 
2745 	dprintk(DEBUG_SUSPEND, "[spi%d] suspend finish\n", master->bus_num);
2746 
2747 	return 0;
2748 }
2749 
sunxi_spi_resume(struct device * dev)2750 static int sunxi_spi_resume(struct device *dev)
2751 {
2752 	struct platform_device *pdev = to_platform_device(dev);
2753 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
2754 	struct sunxi_spi  *sspi = spi_master_get_devdata(master);
2755 	unsigned long flags;
2756 
2757 	sunxi_spi_hw_init(sspi, pdev->dev.platform_data, dev);
2758 
2759 	spin_lock_irqsave(&sspi->lock, flags);
2760 	sspi->busy = SPI_FREE;
2761 	spin_unlock_irqrestore(&sspi->lock, flags);
2762 	dprintk(DEBUG_SUSPEND, "[spi%d] resume finish\n", master->bus_num);
2763 
2764 	return 0;
2765 }
2766 
2767 static const struct dev_pm_ops sunxi_spi_dev_pm_ops = {
2768 	.suspend = sunxi_spi_suspend,
2769 	.resume  = sunxi_spi_resume,
2770 };
2771 
2772 #define SUNXI_SPI_DEV_PM_OPS (&sunxi_spi_dev_pm_ops)
2773 #else
2774 #define SUNXI_SPI_DEV_PM_OPS NULL
2775 #endif /* CONFIG_PM */
2776 
2777 static const struct of_device_id sunxi_spi_match[] = {
2778 	{ .compatible = "allwinner,sun8i-spi", },
2779 	{ .compatible = "allwinner,sun20i-spi", },
2780 	{ .compatible = "allwinner,sun50i-spi", },
2781 	{},
2782 };
2783 MODULE_DEVICE_TABLE(of, sunxi_spi_match);
2784 
2785 
2786 static struct platform_driver sunxi_spi_driver = {
2787 	.probe   = sunxi_spi_probe,
2788 	.remove  = sunxi_spi_remove,
2789 	.driver = {
2790 		.name	= SUNXI_SPI_DEV_NAME,
2791 		.owner	= THIS_MODULE,
2792 		.pm		= SUNXI_SPI_DEV_PM_OPS,
2793 		.of_match_table = sunxi_spi_match,
2794 	},
2795 };
2796 
sunxi_spi_init(void)2797 static int __init sunxi_spi_init(void)
2798 {
2799 	return platform_driver_register(&sunxi_spi_driver);
2800 }
2801 
sunxi_spi_exit(void)2802 static void __exit sunxi_spi_exit(void)
2803 {
2804 	platform_driver_unregister(&sunxi_spi_driver);
2805 }
2806 
2807 fs_initcall_sync(sunxi_spi_init);
2808 module_exit(sunxi_spi_exit);
2809 module_param_named(debug, debug_mask, int, 0664);
2810 
2811 MODULE_AUTHOR("pannan");
2812 MODULE_DESCRIPTION("SUNXI SPI BUS Driver");
2813 MODULE_ALIAS("platform:"SUNXI_SPI_DEV_NAME);
2814 MODULE_LICENSE("GPL");
2815