• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/rawnand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/platform_data/mtd-nand-pxa3xx.h>
33 
34 #define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
35 #define NAND_STOP_DELAY		msecs_to_jiffies(40)
36 #define PAGE_CHUNK_SIZE		(2048)
37 
38 /*
39  * Define a buffer size for the initial command that detects the flash device:
40  * STATUS, READID and PARAM.
41  * ONFI param page is 256 bytes, and there are three redundant copies
42  * to be read. JEDEC param page is 512 bytes, and there are also three
43  * redundant copies to be read.
44  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
45  */
46 #define INIT_BUFFER_SIZE	2048
47 
48 /* registers and bit definitions */
49 #define NDCR		(0x00) /* Control register */
50 #define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
51 #define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
52 #define NDSR		(0x14) /* Status Register */
53 #define NDPCR		(0x18) /* Page Count Register */
54 #define NDBDR0		(0x1C) /* Bad Block Register 0 */
55 #define NDBDR1		(0x20) /* Bad Block Register 1 */
56 #define NDECCCTRL	(0x28) /* ECC control */
57 #define NDDB		(0x40) /* Data Buffer */
58 #define NDCB0		(0x48) /* Command Buffer0 */
59 #define NDCB1		(0x4C) /* Command Buffer1 */
60 #define NDCB2		(0x50) /* Command Buffer2 */
61 
62 #define NDCR_SPARE_EN		(0x1 << 31)
63 #define NDCR_ECC_EN		(0x1 << 30)
64 #define NDCR_DMA_EN		(0x1 << 29)
65 #define NDCR_ND_RUN		(0x1 << 28)
66 #define NDCR_DWIDTH_C		(0x1 << 27)
67 #define NDCR_DWIDTH_M		(0x1 << 26)
68 #define NDCR_PAGE_SZ		(0x1 << 24)
69 #define NDCR_NCSX		(0x1 << 23)
70 #define NDCR_ND_MODE		(0x3 << 21)
71 #define NDCR_NAND_MODE   	(0x0)
72 #define NDCR_CLR_PG_CNT		(0x1 << 20)
73 #define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
74 #define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
75 #define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
76 #define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
77 
78 #define NDCR_RA_START		(0x1 << 15)
79 #define NDCR_PG_PER_BLK		(0x1 << 14)
80 #define NDCR_ND_ARB_EN		(0x1 << 12)
81 #define NDCR_INT_MASK           (0xFFF)
82 
83 #define NDSR_MASK		(0xfff)
84 #define NDSR_ERR_CNT_OFF	(16)
85 #define NDSR_ERR_CNT_MASK       (0x1f)
86 #define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
87 #define NDSR_RDY                (0x1 << 12)
88 #define NDSR_FLASH_RDY          (0x1 << 11)
89 #define NDSR_CS0_PAGED		(0x1 << 10)
90 #define NDSR_CS1_PAGED		(0x1 << 9)
91 #define NDSR_CS0_CMDD		(0x1 << 8)
92 #define NDSR_CS1_CMDD		(0x1 << 7)
93 #define NDSR_CS0_BBD		(0x1 << 6)
94 #define NDSR_CS1_BBD		(0x1 << 5)
95 #define NDSR_UNCORERR		(0x1 << 4)
96 #define NDSR_CORERR		(0x1 << 3)
97 #define NDSR_WRDREQ		(0x1 << 2)
98 #define NDSR_RDDREQ		(0x1 << 1)
99 #define NDSR_WRCMDREQ		(0x1)
100 
101 #define NDCB0_LEN_OVRD		(0x1 << 28)
102 #define NDCB0_ST_ROW_EN         (0x1 << 26)
103 #define NDCB0_AUTO_RS		(0x1 << 25)
104 #define NDCB0_CSEL		(0x1 << 24)
105 #define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
106 #define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
107 #define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
108 #define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
109 #define NDCB0_NC		(0x1 << 20)
110 #define NDCB0_DBC		(0x1 << 19)
111 #define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
112 #define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
113 #define NDCB0_CMD2_MASK		(0xff << 8)
114 #define NDCB0_CMD1_MASK		(0xff)
115 #define NDCB0_ADDR_CYC_SHIFT	(16)
116 
117 #define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
118 #define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
119 #define EXT_CMD_TYPE_READ	4 /* Read */
120 #define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
121 #define EXT_CMD_TYPE_FINAL	3 /* Final command */
122 #define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
123 #define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
124 
125 /*
126  * This should be large enough to read 'ONFI' and 'JEDEC'.
127  * Let's use 7 bytes, which is the maximum ID count supported
128  * by the controller (see NDCR_RD_ID_CNT_MASK).
129  */
130 #define READ_ID_BYTES		7
131 
132 /* macros for registers read/write */
133 #define nand_writel(info, off, val)					\
134 	do {								\
135 		dev_vdbg(&info->pdev->dev,				\
136 			 "%s():%d nand_writel(0x%x, 0x%04x)\n",		\
137 			 __func__, __LINE__, (val), (off));		\
138 		writel_relaxed((val), (info)->mmio_base + (off));	\
139 	} while (0)
140 
141 #define nand_readl(info, off)						\
142 	({								\
143 		unsigned int _v;					\
144 		_v = readl_relaxed((info)->mmio_base + (off));		\
145 		dev_vdbg(&info->pdev->dev,				\
146 			 "%s():%d nand_readl(0x%04x) = 0x%x\n",		\
147 			 __func__, __LINE__, (off), _v);		\
148 		_v;							\
149 	})
150 
151 /* error code and state */
152 enum {
153 	ERR_NONE	= 0,
154 	ERR_DMABUSERR	= -1,
155 	ERR_SENDCMD	= -2,
156 	ERR_UNCORERR	= -3,
157 	ERR_BBERR	= -4,
158 	ERR_CORERR	= -5,
159 };
160 
161 enum {
162 	STATE_IDLE = 0,
163 	STATE_PREPARED,
164 	STATE_CMD_HANDLE,
165 	STATE_DMA_READING,
166 	STATE_DMA_WRITING,
167 	STATE_DMA_DONE,
168 	STATE_PIO_READING,
169 	STATE_PIO_WRITING,
170 	STATE_CMD_DONE,
171 	STATE_READY,
172 };
173 
174 enum pxa3xx_nand_variant {
175 	PXA3XX_NAND_VARIANT_PXA,
176 	PXA3XX_NAND_VARIANT_ARMADA370,
177 };
178 
179 struct pxa3xx_nand_host {
180 	struct nand_chip	chip;
181 	void			*info_data;
182 
183 	/* page size of attached chip */
184 	int			use_ecc;
185 	int			cs;
186 
187 	/* calculated from pxa3xx_nand_flash data */
188 	unsigned int		col_addr_cycles;
189 	unsigned int		row_addr_cycles;
190 };
191 
192 struct pxa3xx_nand_info {
193 	struct nand_hw_control	controller;
194 	struct platform_device	 *pdev;
195 
196 	struct clk		*clk;
197 	void __iomem		*mmio_base;
198 	unsigned long		mmio_phys;
199 	struct completion	cmd_complete, dev_ready;
200 
201 	unsigned int 		buf_start;
202 	unsigned int		buf_count;
203 	unsigned int		buf_size;
204 	unsigned int		data_buff_pos;
205 	unsigned int		oob_buff_pos;
206 
207 	/* DMA information */
208 	struct scatterlist	sg;
209 	enum dma_data_direction	dma_dir;
210 	struct dma_chan		*dma_chan;
211 	dma_cookie_t		dma_cookie;
212 	int			drcmr_dat;
213 
214 	unsigned char		*data_buff;
215 	unsigned char		*oob_buff;
216 	dma_addr_t 		data_buff_phys;
217 	int 			data_dma_ch;
218 
219 	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
220 	unsigned int		state;
221 
222 	/*
223 	 * This driver supports NFCv1 (as found in PXA SoC)
224 	 * and NFCv2 (as found in Armada 370/XP SoC).
225 	 */
226 	enum pxa3xx_nand_variant variant;
227 
228 	int			cs;
229 	int			use_ecc;	/* use HW ECC ? */
230 	int			ecc_bch;	/* using BCH ECC? */
231 	int			use_dma;	/* use DMA ? */
232 	int			use_spare;	/* use spare ? */
233 	int			need_wait;
234 
235 	/* Amount of real data per full chunk */
236 	unsigned int		chunk_size;
237 
238 	/* Amount of spare data per full chunk */
239 	unsigned int		spare_size;
240 
241 	/* Number of full chunks (i.e chunk_size + spare_size) */
242 	unsigned int            nfullchunks;
243 
244 	/*
245 	 * Total number of chunks. If equal to nfullchunks, then there
246 	 * are only full chunks. Otherwise, there is one last chunk of
247 	 * size (last_chunk_size + last_spare_size)
248 	 */
249 	unsigned int            ntotalchunks;
250 
251 	/* Amount of real data in the last chunk */
252 	unsigned int		last_chunk_size;
253 
254 	/* Amount of spare data in the last chunk */
255 	unsigned int		last_spare_size;
256 
257 	unsigned int		ecc_size;
258 	unsigned int		ecc_err_cnt;
259 	unsigned int		max_bitflips;
260 	int 			retcode;
261 
262 	/*
263 	 * Variables only valid during command
264 	 * execution. step_chunk_size and step_spare_size is the
265 	 * amount of real data and spare data in the current
266 	 * chunk. cur_chunk is the current chunk being
267 	 * read/programmed.
268 	 */
269 	unsigned int		step_chunk_size;
270 	unsigned int		step_spare_size;
271 	unsigned int            cur_chunk;
272 
273 	/* cached register value */
274 	uint32_t		reg_ndcr;
275 	uint32_t		ndtr0cs0;
276 	uint32_t		ndtr1cs0;
277 
278 	/* generated NDCBx register values */
279 	uint32_t		ndcb0;
280 	uint32_t		ndcb1;
281 	uint32_t		ndcb2;
282 	uint32_t		ndcb3;
283 };
284 
285 static bool use_dma = 1;
286 module_param(use_dma, bool, 0444);
287 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
288 
289 struct pxa3xx_nand_timing {
290 	unsigned int	tCH;  /* Enable signal hold time */
291 	unsigned int	tCS;  /* Enable signal setup time */
292 	unsigned int	tWH;  /* ND_nWE high duration */
293 	unsigned int	tWP;  /* ND_nWE pulse time */
294 	unsigned int	tRH;  /* ND_nRE high duration */
295 	unsigned int	tRP;  /* ND_nRE pulse width */
296 	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
297 	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
298 	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
299 };
300 
301 struct pxa3xx_nand_flash {
302 	uint32_t	chip_id;
303 	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
304 	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
305 	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
306 };
307 
308 static struct pxa3xx_nand_timing timing[] = {
309 	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
310 	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
311 	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
312 	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
313 };
314 
315 static struct pxa3xx_nand_flash builtin_flash_types[] = {
316 	{ 0x46ec, 16, 16, &timing[1] },
317 	{ 0xdaec,  8,  8, &timing[1] },
318 	{ 0xd7ec,  8,  8, &timing[1] },
319 	{ 0xa12c,  8,  8, &timing[2] },
320 	{ 0xb12c, 16, 16, &timing[2] },
321 	{ 0xdc2c,  8,  8, &timing[2] },
322 	{ 0xcc2c, 16, 16, &timing[2] },
323 	{ 0xba20, 16, 16, &timing[3] },
324 };
325 
pxa3xx_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)326 static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
327 				struct mtd_oob_region *oobregion)
328 {
329 	struct nand_chip *chip = mtd_to_nand(mtd);
330 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
331 	struct pxa3xx_nand_info *info = host->info_data;
332 	int nchunks = mtd->writesize / info->chunk_size;
333 
334 	if (section >= nchunks)
335 		return -ERANGE;
336 
337 	oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
338 			    info->spare_size;
339 	oobregion->length = info->ecc_size;
340 
341 	return 0;
342 }
343 
pxa3xx_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)344 static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
345 				 struct mtd_oob_region *oobregion)
346 {
347 	struct nand_chip *chip = mtd_to_nand(mtd);
348 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
349 	struct pxa3xx_nand_info *info = host->info_data;
350 	int nchunks = mtd->writesize / info->chunk_size;
351 
352 	if (section >= nchunks)
353 		return -ERANGE;
354 
355 	if (!info->spare_size)
356 		return 0;
357 
358 	oobregion->offset = section * (info->ecc_size + info->spare_size);
359 	oobregion->length = info->spare_size;
360 	if (!section) {
361 		/*
362 		 * Bootrom looks in bytes 0 & 5 for bad blocks for the
363 		 * 4KB page / 4bit BCH combination.
364 		 */
365 		if (mtd->writesize == 4096 && info->chunk_size == 2048) {
366 			oobregion->offset += 6;
367 			oobregion->length -= 6;
368 		} else {
369 			oobregion->offset += 2;
370 			oobregion->length -= 2;
371 		}
372 	}
373 
374 	return 0;
375 }
376 
377 static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
378 	.ecc = pxa3xx_ooblayout_ecc,
379 	.free = pxa3xx_ooblayout_free,
380 };
381 
382 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
383 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
384 
385 static struct nand_bbt_descr bbt_main_descr = {
386 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
387 		| NAND_BBT_2BIT | NAND_BBT_VERSION,
388 	.offs =	8,
389 	.len = 6,
390 	.veroffs = 14,
391 	.maxblocks = 8,		/* Last 8 blocks in each chip */
392 	.pattern = bbt_pattern
393 };
394 
395 static struct nand_bbt_descr bbt_mirror_descr = {
396 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
397 		| NAND_BBT_2BIT | NAND_BBT_VERSION,
398 	.offs =	8,
399 	.len = 6,
400 	.veroffs = 14,
401 	.maxblocks = 8,		/* Last 8 blocks in each chip */
402 	.pattern = bbt_mirror_pattern
403 };
404 
405 #define NDTR0_tCH(c)	(min((c), 7) << 19)
406 #define NDTR0_tCS(c)	(min((c), 7) << 16)
407 #define NDTR0_tWH(c)	(min((c), 7) << 11)
408 #define NDTR0_tWP(c)	(min((c), 7) << 8)
409 #define NDTR0_tRH(c)	(min((c), 7) << 3)
410 #define NDTR0_tRP(c)	(min((c), 7) << 0)
411 
412 #define NDTR1_tR(c)	(min((c), 65535) << 16)
413 #define NDTR1_tWHR(c)	(min((c), 15) << 4)
414 #define NDTR1_tAR(c)	(min((c), 15) << 0)
415 
416 /* convert nano-seconds to nand flash controller clock cycles */
417 #define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
418 
419 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
420 	{
421 		.compatible = "marvell,pxa3xx-nand",
422 		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
423 	},
424 	{
425 		.compatible = "marvell,armada370-nand",
426 		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
427 	},
428 	{}
429 };
430 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
431 
432 static enum pxa3xx_nand_variant
pxa3xx_nand_get_variant(struct platform_device * pdev)433 pxa3xx_nand_get_variant(struct platform_device *pdev)
434 {
435 	const struct of_device_id *of_id =
436 			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
437 	if (!of_id)
438 		return PXA3XX_NAND_VARIANT_PXA;
439 	return (enum pxa3xx_nand_variant)of_id->data;
440 }
441 
pxa3xx_nand_set_timing(struct pxa3xx_nand_host * host,const struct pxa3xx_nand_timing * t)442 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
443 				   const struct pxa3xx_nand_timing *t)
444 {
445 	struct pxa3xx_nand_info *info = host->info_data;
446 	unsigned long nand_clk = clk_get_rate(info->clk);
447 	uint32_t ndtr0, ndtr1;
448 
449 	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
450 		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
451 		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
452 		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
453 		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
454 		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
455 
456 	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
457 		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
458 		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
459 
460 	info->ndtr0cs0 = ndtr0;
461 	info->ndtr1cs0 = ndtr1;
462 	nand_writel(info, NDTR0CS0, ndtr0);
463 	nand_writel(info, NDTR1CS0, ndtr1);
464 }
465 
pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host * host,const struct nand_sdr_timings * t)466 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
467 				       const struct nand_sdr_timings *t)
468 {
469 	struct pxa3xx_nand_info *info = host->info_data;
470 	struct nand_chip *chip = &host->chip;
471 	unsigned long nand_clk = clk_get_rate(info->clk);
472 	uint32_t ndtr0, ndtr1;
473 
474 	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
475 	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
476 	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
477 	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
478 	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
479 	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
480 	u32 tR = chip->chip_delay * 1000;
481 	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
482 	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
483 
484 	/* fallback to a default value if tR = 0 */
485 	if (!tR)
486 		tR = 20000;
487 
488 	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
489 		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
490 		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
491 		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
492 		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
493 		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
494 
495 	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
496 		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
497 		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
498 
499 	info->ndtr0cs0 = ndtr0;
500 	info->ndtr1cs0 = ndtr1;
501 	nand_writel(info, NDTR0CS0, ndtr0);
502 	nand_writel(info, NDTR1CS0, ndtr1);
503 }
504 
pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host * host,unsigned int * flash_width,unsigned int * dfc_width)505 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
506 					   unsigned int *flash_width,
507 					   unsigned int *dfc_width)
508 {
509 	struct nand_chip *chip = &host->chip;
510 	struct pxa3xx_nand_info *info = host->info_data;
511 	const struct pxa3xx_nand_flash *f = NULL;
512 	struct mtd_info *mtd = nand_to_mtd(&host->chip);
513 	int i, id, ntypes;
514 
515 	ntypes = ARRAY_SIZE(builtin_flash_types);
516 
517 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
518 
519 	id = chip->read_byte(mtd);
520 	id |= chip->read_byte(mtd) << 0x8;
521 
522 	for (i = 0; i < ntypes; i++) {
523 		f = &builtin_flash_types[i];
524 
525 		if (f->chip_id == id)
526 			break;
527 	}
528 
529 	if (i == ntypes) {
530 		dev_err(&info->pdev->dev, "Error: timings not found\n");
531 		return -EINVAL;
532 	}
533 
534 	pxa3xx_nand_set_timing(host, f->timing);
535 
536 	*flash_width = f->flash_width;
537 	*dfc_width = f->dfc_width;
538 
539 	return 0;
540 }
541 
pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host * host,int mode)542 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
543 					 int mode)
544 {
545 	const struct nand_sdr_timings *timings;
546 
547 	mode = fls(mode) - 1;
548 	if (mode < 0)
549 		mode = 0;
550 
551 	timings = onfi_async_timing_mode_to_sdr_timings(mode);
552 	if (IS_ERR(timings))
553 		return PTR_ERR(timings);
554 
555 	pxa3xx_nand_set_sdr_timing(host, timings);
556 
557 	return 0;
558 }
559 
pxa3xx_nand_init(struct pxa3xx_nand_host * host)560 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
561 {
562 	struct nand_chip *chip = &host->chip;
563 	struct pxa3xx_nand_info *info = host->info_data;
564 	unsigned int flash_width = 0, dfc_width = 0;
565 	int mode, err;
566 
567 	mode = onfi_get_async_timing_mode(chip);
568 	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
569 		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
570 						      &dfc_width);
571 		if (err)
572 			return err;
573 
574 		if (flash_width == 16) {
575 			info->reg_ndcr |= NDCR_DWIDTH_M;
576 			chip->options |= NAND_BUSWIDTH_16;
577 		}
578 
579 		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
580 	} else {
581 		err = pxa3xx_nand_init_timings_onfi(host, mode);
582 		if (err)
583 			return err;
584 	}
585 
586 	return 0;
587 }
588 
589 /**
590  * NOTE: it is a must to set ND_RUN firstly, then write
591  * command buffer, otherwise, it does not work.
592  * We enable all the interrupt at the same time, and
593  * let pxa3xx_nand_irq to handle all logic.
594  */
pxa3xx_nand_start(struct pxa3xx_nand_info * info)595 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
596 {
597 	uint32_t ndcr;
598 
599 	ndcr = info->reg_ndcr;
600 
601 	if (info->use_ecc) {
602 		ndcr |= NDCR_ECC_EN;
603 		if (info->ecc_bch)
604 			nand_writel(info, NDECCCTRL, 0x1);
605 	} else {
606 		ndcr &= ~NDCR_ECC_EN;
607 		if (info->ecc_bch)
608 			nand_writel(info, NDECCCTRL, 0x0);
609 	}
610 
611 	if (info->use_dma)
612 		ndcr |= NDCR_DMA_EN;
613 	else
614 		ndcr &= ~NDCR_DMA_EN;
615 
616 	if (info->use_spare)
617 		ndcr |= NDCR_SPARE_EN;
618 	else
619 		ndcr &= ~NDCR_SPARE_EN;
620 
621 	ndcr |= NDCR_ND_RUN;
622 
623 	/* clear status bits and run */
624 	nand_writel(info, NDSR, NDSR_MASK);
625 	nand_writel(info, NDCR, 0);
626 	nand_writel(info, NDCR, ndcr);
627 }
628 
pxa3xx_nand_stop(struct pxa3xx_nand_info * info)629 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
630 {
631 	uint32_t ndcr;
632 	int timeout = NAND_STOP_DELAY;
633 
634 	/* wait RUN bit in NDCR become 0 */
635 	ndcr = nand_readl(info, NDCR);
636 	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
637 		ndcr = nand_readl(info, NDCR);
638 		udelay(1);
639 	}
640 
641 	if (timeout <= 0) {
642 		ndcr &= ~NDCR_ND_RUN;
643 		nand_writel(info, NDCR, ndcr);
644 	}
645 	if (info->dma_chan)
646 		dmaengine_terminate_all(info->dma_chan);
647 
648 	/* clear status bits */
649 	nand_writel(info, NDSR, NDSR_MASK);
650 }
651 
652 static void __maybe_unused
enable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)653 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
654 {
655 	uint32_t ndcr;
656 
657 	ndcr = nand_readl(info, NDCR);
658 	nand_writel(info, NDCR, ndcr & ~int_mask);
659 }
660 
disable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)661 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
662 {
663 	uint32_t ndcr;
664 
665 	ndcr = nand_readl(info, NDCR);
666 	nand_writel(info, NDCR, ndcr | int_mask);
667 }
668 
drain_fifo(struct pxa3xx_nand_info * info,void * data,int len)669 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
670 {
671 	if (info->ecc_bch) {
672 		u32 val;
673 		int ret;
674 
675 		/*
676 		 * According to the datasheet, when reading from NDDB
677 		 * with BCH enabled, after each 32 bytes reads, we
678 		 * have to make sure that the NDSR.RDDREQ bit is set.
679 		 *
680 		 * Drain the FIFO 8 32 bits reads at a time, and skip
681 		 * the polling on the last read.
682 		 */
683 		while (len > 8) {
684 			ioread32_rep(info->mmio_base + NDDB, data, 8);
685 
686 			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
687 							 val & NDSR_RDDREQ, 1000, 5000);
688 			if (ret) {
689 				dev_err(&info->pdev->dev,
690 					"Timeout on RDDREQ while draining the FIFO\n");
691 				return;
692 			}
693 
694 			data += 32;
695 			len -= 8;
696 		}
697 	}
698 
699 	ioread32_rep(info->mmio_base + NDDB, data, len);
700 }
701 
handle_data_pio(struct pxa3xx_nand_info * info)702 static void handle_data_pio(struct pxa3xx_nand_info *info)
703 {
704 	switch (info->state) {
705 	case STATE_PIO_WRITING:
706 		if (info->step_chunk_size)
707 			writesl(info->mmio_base + NDDB,
708 				info->data_buff + info->data_buff_pos,
709 				DIV_ROUND_UP(info->step_chunk_size, 4));
710 
711 		if (info->step_spare_size)
712 			writesl(info->mmio_base + NDDB,
713 				info->oob_buff + info->oob_buff_pos,
714 				DIV_ROUND_UP(info->step_spare_size, 4));
715 		break;
716 	case STATE_PIO_READING:
717 		if (info->step_chunk_size)
718 			drain_fifo(info,
719 				   info->data_buff + info->data_buff_pos,
720 				   DIV_ROUND_UP(info->step_chunk_size, 4));
721 
722 		if (info->step_spare_size)
723 			drain_fifo(info,
724 				   info->oob_buff + info->oob_buff_pos,
725 				   DIV_ROUND_UP(info->step_spare_size, 4));
726 		break;
727 	default:
728 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
729 				info->state);
730 		BUG();
731 	}
732 
733 	/* Update buffer pointers for multi-page read/write */
734 	info->data_buff_pos += info->step_chunk_size;
735 	info->oob_buff_pos += info->step_spare_size;
736 }
737 
pxa3xx_nand_data_dma_irq(void * data)738 static void pxa3xx_nand_data_dma_irq(void *data)
739 {
740 	struct pxa3xx_nand_info *info = data;
741 	struct dma_tx_state state;
742 	enum dma_status status;
743 
744 	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
745 	if (likely(status == DMA_COMPLETE)) {
746 		info->state = STATE_DMA_DONE;
747 	} else {
748 		dev_err(&info->pdev->dev, "DMA error on data channel\n");
749 		info->retcode = ERR_DMABUSERR;
750 	}
751 	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
752 
753 	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
754 	enable_int(info, NDCR_INT_MASK);
755 }
756 
start_data_dma(struct pxa3xx_nand_info * info)757 static void start_data_dma(struct pxa3xx_nand_info *info)
758 {
759 	enum dma_transfer_direction direction;
760 	struct dma_async_tx_descriptor *tx;
761 
762 	switch (info->state) {
763 	case STATE_DMA_WRITING:
764 		info->dma_dir = DMA_TO_DEVICE;
765 		direction = DMA_MEM_TO_DEV;
766 		break;
767 	case STATE_DMA_READING:
768 		info->dma_dir = DMA_FROM_DEVICE;
769 		direction = DMA_DEV_TO_MEM;
770 		break;
771 	default:
772 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
773 				info->state);
774 		BUG();
775 	}
776 	info->sg.length = info->chunk_size;
777 	if (info->use_spare)
778 		info->sg.length += info->spare_size + info->ecc_size;
779 	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
780 
781 	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
782 				     DMA_PREP_INTERRUPT);
783 	if (!tx) {
784 		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
785 		return;
786 	}
787 	tx->callback = pxa3xx_nand_data_dma_irq;
788 	tx->callback_param = info;
789 	info->dma_cookie = dmaengine_submit(tx);
790 	dma_async_issue_pending(info->dma_chan);
791 	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
792 		__func__, direction, info->dma_cookie, info->sg.length);
793 }
794 
pxa3xx_nand_irq_thread(int irq,void * data)795 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
796 {
797 	struct pxa3xx_nand_info *info = data;
798 
799 	handle_data_pio(info);
800 
801 	info->state = STATE_CMD_DONE;
802 	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
803 
804 	return IRQ_HANDLED;
805 }
806 
pxa3xx_nand_irq(int irq,void * devid)807 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
808 {
809 	struct pxa3xx_nand_info *info = devid;
810 	unsigned int status, is_completed = 0, is_ready = 0;
811 	unsigned int ready, cmd_done;
812 	irqreturn_t ret = IRQ_HANDLED;
813 
814 	if (info->cs == 0) {
815 		ready           = NDSR_FLASH_RDY;
816 		cmd_done        = NDSR_CS0_CMDD;
817 	} else {
818 		ready           = NDSR_RDY;
819 		cmd_done        = NDSR_CS1_CMDD;
820 	}
821 
822 	status = nand_readl(info, NDSR);
823 
824 	if (status & NDSR_UNCORERR)
825 		info->retcode = ERR_UNCORERR;
826 	if (status & NDSR_CORERR) {
827 		info->retcode = ERR_CORERR;
828 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
829 		    info->ecc_bch)
830 			info->ecc_err_cnt = NDSR_ERR_CNT(status);
831 		else
832 			info->ecc_err_cnt = 1;
833 
834 		/*
835 		 * Each chunk composing a page is corrected independently,
836 		 * and we need to store maximum number of corrected bitflips
837 		 * to return it to the MTD layer in ecc.read_page().
838 		 */
839 		info->max_bitflips = max_t(unsigned int,
840 					   info->max_bitflips,
841 					   info->ecc_err_cnt);
842 	}
843 	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
844 		/* whether use dma to transfer data */
845 		if (info->use_dma) {
846 			disable_int(info, NDCR_INT_MASK);
847 			info->state = (status & NDSR_RDDREQ) ?
848 				      STATE_DMA_READING : STATE_DMA_WRITING;
849 			start_data_dma(info);
850 			goto NORMAL_IRQ_EXIT;
851 		} else {
852 			info->state = (status & NDSR_RDDREQ) ?
853 				      STATE_PIO_READING : STATE_PIO_WRITING;
854 			ret = IRQ_WAKE_THREAD;
855 			goto NORMAL_IRQ_EXIT;
856 		}
857 	}
858 	if (status & cmd_done) {
859 		info->state = STATE_CMD_DONE;
860 		is_completed = 1;
861 	}
862 	if (status & ready) {
863 		info->state = STATE_READY;
864 		is_ready = 1;
865 	}
866 
867 	/*
868 	 * Clear all status bit before issuing the next command, which
869 	 * can and will alter the status bits and will deserve a new
870 	 * interrupt on its own. This lets the controller exit the IRQ
871 	 */
872 	nand_writel(info, NDSR, status);
873 
874 	if (status & NDSR_WRCMDREQ) {
875 		status &= ~NDSR_WRCMDREQ;
876 		info->state = STATE_CMD_HANDLE;
877 
878 		/*
879 		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
880 		 * must be loaded by writing directly either 12 or 16
881 		 * bytes directly to NDCB0, four bytes at a time.
882 		 *
883 		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
884 		 * but each NDCBx register can be read.
885 		 */
886 		nand_writel(info, NDCB0, info->ndcb0);
887 		nand_writel(info, NDCB0, info->ndcb1);
888 		nand_writel(info, NDCB0, info->ndcb2);
889 
890 		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
891 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
892 			nand_writel(info, NDCB0, info->ndcb3);
893 	}
894 
895 	if (is_completed)
896 		complete(&info->cmd_complete);
897 	if (is_ready)
898 		complete(&info->dev_ready);
899 NORMAL_IRQ_EXIT:
900 	return ret;
901 }
902 
is_buf_blank(uint8_t * buf,size_t len)903 static inline int is_buf_blank(uint8_t *buf, size_t len)
904 {
905 	for (; len > 0; len--)
906 		if (*buf++ != 0xff)
907 			return 0;
908 	return 1;
909 }
910 
set_command_address(struct pxa3xx_nand_info * info,unsigned int page_size,uint16_t column,int page_addr)911 static void set_command_address(struct pxa3xx_nand_info *info,
912 		unsigned int page_size, uint16_t column, int page_addr)
913 {
914 	/* small page addr setting */
915 	if (page_size < PAGE_CHUNK_SIZE) {
916 		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
917 				| (column & 0xFF);
918 
919 		info->ndcb2 = 0;
920 	} else {
921 		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
922 				| (column & 0xFFFF);
923 
924 		if (page_addr & 0xFF0000)
925 			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
926 		else
927 			info->ndcb2 = 0;
928 	}
929 }
930 
prepare_start_command(struct pxa3xx_nand_info * info,int command)931 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
932 {
933 	struct pxa3xx_nand_host *host = info->host[info->cs];
934 	struct mtd_info *mtd = nand_to_mtd(&host->chip);
935 
936 	/* reset data and oob column point to handle data */
937 	info->buf_start		= 0;
938 	info->buf_count		= 0;
939 	info->data_buff_pos	= 0;
940 	info->oob_buff_pos	= 0;
941 	info->step_chunk_size   = 0;
942 	info->step_spare_size   = 0;
943 	info->cur_chunk         = 0;
944 	info->use_ecc		= 0;
945 	info->use_spare		= 1;
946 	info->retcode		= ERR_NONE;
947 	info->ecc_err_cnt	= 0;
948 	info->ndcb3		= 0;
949 	info->need_wait		= 0;
950 
951 	switch (command) {
952 	case NAND_CMD_READ0:
953 	case NAND_CMD_READOOB:
954 	case NAND_CMD_PAGEPROG:
955 		info->use_ecc = 1;
956 		break;
957 	case NAND_CMD_PARAM:
958 		info->use_spare = 0;
959 		break;
960 	default:
961 		info->ndcb1 = 0;
962 		info->ndcb2 = 0;
963 		break;
964 	}
965 
966 	/*
967 	 * If we are about to issue a read command, or about to set
968 	 * the write address, then clean the data buffer.
969 	 */
970 	if (command == NAND_CMD_READ0 ||
971 	    command == NAND_CMD_READOOB ||
972 	    command == NAND_CMD_SEQIN) {
973 
974 		info->buf_count = mtd->writesize + mtd->oobsize;
975 		memset(info->data_buff, 0xFF, info->buf_count);
976 	}
977 
978 }
979 
prepare_set_command(struct pxa3xx_nand_info * info,int command,int ext_cmd_type,uint16_t column,int page_addr)980 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
981 		int ext_cmd_type, uint16_t column, int page_addr)
982 {
983 	int addr_cycle, exec_cmd;
984 	struct pxa3xx_nand_host *host;
985 	struct mtd_info *mtd;
986 
987 	host = info->host[info->cs];
988 	mtd = nand_to_mtd(&host->chip);
989 	addr_cycle = 0;
990 	exec_cmd = 1;
991 
992 	if (info->cs != 0)
993 		info->ndcb0 = NDCB0_CSEL;
994 	else
995 		info->ndcb0 = 0;
996 
997 	if (command == NAND_CMD_SEQIN)
998 		exec_cmd = 0;
999 
1000 	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
1001 				    + host->col_addr_cycles);
1002 
1003 	switch (command) {
1004 	case NAND_CMD_READOOB:
1005 	case NAND_CMD_READ0:
1006 		info->buf_start = column;
1007 		info->ndcb0 |= NDCB0_CMD_TYPE(0)
1008 				| addr_cycle
1009 				| NAND_CMD_READ0;
1010 
1011 		if (command == NAND_CMD_READOOB)
1012 			info->buf_start += mtd->writesize;
1013 
1014 		if (info->cur_chunk < info->nfullchunks) {
1015 			info->step_chunk_size = info->chunk_size;
1016 			info->step_spare_size = info->spare_size;
1017 		} else {
1018 			info->step_chunk_size = info->last_chunk_size;
1019 			info->step_spare_size = info->last_spare_size;
1020 		}
1021 
1022 		/*
1023 		 * Multiple page read needs an 'extended command type' field,
1024 		 * which is either naked-read or last-read according to the
1025 		 * state.
1026 		 */
1027 		if (mtd->writesize == PAGE_CHUNK_SIZE) {
1028 			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1029 		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1030 			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1031 					| NDCB0_LEN_OVRD
1032 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1033 			info->ndcb3 = info->step_chunk_size +
1034 				info->step_spare_size;
1035 		}
1036 
1037 		set_command_address(info, mtd->writesize, column, page_addr);
1038 		break;
1039 
1040 	case NAND_CMD_SEQIN:
1041 
1042 		info->buf_start = column;
1043 		set_command_address(info, mtd->writesize, 0, page_addr);
1044 
1045 		/*
1046 		 * Multiple page programming needs to execute the initial
1047 		 * SEQIN command that sets the page address.
1048 		 */
1049 		if (mtd->writesize > PAGE_CHUNK_SIZE) {
1050 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1051 				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1052 				| addr_cycle
1053 				| command;
1054 			exec_cmd = 1;
1055 		}
1056 		break;
1057 
1058 	case NAND_CMD_PAGEPROG:
1059 		if (is_buf_blank(info->data_buff,
1060 					(mtd->writesize + mtd->oobsize))) {
1061 			exec_cmd = 0;
1062 			break;
1063 		}
1064 
1065 		if (info->cur_chunk < info->nfullchunks) {
1066 			info->step_chunk_size = info->chunk_size;
1067 			info->step_spare_size = info->spare_size;
1068 		} else {
1069 			info->step_chunk_size = info->last_chunk_size;
1070 			info->step_spare_size = info->last_spare_size;
1071 		}
1072 
1073 		/* Second command setting for large pages */
1074 		if (mtd->writesize > PAGE_CHUNK_SIZE) {
1075 			/*
1076 			 * Multiple page write uses the 'extended command'
1077 			 * field. This can be used to issue a command dispatch
1078 			 * or a naked-write depending on the current stage.
1079 			 */
1080 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1081 					| NDCB0_LEN_OVRD
1082 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1083 			info->ndcb3 = info->step_chunk_size +
1084 				      info->step_spare_size;
1085 
1086 			/*
1087 			 * This is the command dispatch that completes a chunked
1088 			 * page program operation.
1089 			 */
1090 			if (info->cur_chunk == info->ntotalchunks) {
1091 				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1092 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1093 					| command;
1094 				info->ndcb1 = 0;
1095 				info->ndcb2 = 0;
1096 				info->ndcb3 = 0;
1097 			}
1098 		} else {
1099 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1100 					| NDCB0_AUTO_RS
1101 					| NDCB0_ST_ROW_EN
1102 					| NDCB0_DBC
1103 					| (NAND_CMD_PAGEPROG << 8)
1104 					| NAND_CMD_SEQIN
1105 					| addr_cycle;
1106 		}
1107 		break;
1108 
1109 	case NAND_CMD_PARAM:
1110 		info->buf_count = INIT_BUFFER_SIZE;
1111 		info->ndcb0 |= NDCB0_CMD_TYPE(0)
1112 				| NDCB0_ADDR_CYC(1)
1113 				| NDCB0_LEN_OVRD
1114 				| command;
1115 		info->ndcb1 = (column & 0xFF);
1116 		info->ndcb3 = INIT_BUFFER_SIZE;
1117 		info->step_chunk_size = INIT_BUFFER_SIZE;
1118 		break;
1119 
1120 	case NAND_CMD_READID:
1121 		info->buf_count = READ_ID_BYTES;
1122 		info->ndcb0 |= NDCB0_CMD_TYPE(3)
1123 				| NDCB0_ADDR_CYC(1)
1124 				| command;
1125 		info->ndcb1 = (column & 0xFF);
1126 
1127 		info->step_chunk_size = 8;
1128 		break;
1129 	case NAND_CMD_STATUS:
1130 		info->buf_count = 1;
1131 		info->ndcb0 |= NDCB0_CMD_TYPE(4)
1132 				| NDCB0_ADDR_CYC(1)
1133 				| command;
1134 
1135 		info->step_chunk_size = 8;
1136 		break;
1137 
1138 	case NAND_CMD_ERASE1:
1139 		info->ndcb0 |= NDCB0_CMD_TYPE(2)
1140 				| NDCB0_AUTO_RS
1141 				| NDCB0_ADDR_CYC(3)
1142 				| NDCB0_DBC
1143 				| (NAND_CMD_ERASE2 << 8)
1144 				| NAND_CMD_ERASE1;
1145 		info->ndcb1 = page_addr;
1146 		info->ndcb2 = 0;
1147 
1148 		break;
1149 	case NAND_CMD_RESET:
1150 		info->ndcb0 |= NDCB0_CMD_TYPE(5)
1151 				| command;
1152 
1153 		break;
1154 
1155 	case NAND_CMD_ERASE2:
1156 		exec_cmd = 0;
1157 		break;
1158 
1159 	default:
1160 		exec_cmd = 0;
1161 		dev_err(&info->pdev->dev, "non-supported command %x\n",
1162 				command);
1163 		break;
1164 	}
1165 
1166 	return exec_cmd;
1167 }
1168 
nand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)1169 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1170 			 int column, int page_addr)
1171 {
1172 	struct nand_chip *chip = mtd_to_nand(mtd);
1173 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1174 	struct pxa3xx_nand_info *info = host->info_data;
1175 	int exec_cmd;
1176 
1177 	/*
1178 	 * if this is a x16 device ,then convert the input
1179 	 * "byte" address into a "word" address appropriate
1180 	 * for indexing a word-oriented device
1181 	 */
1182 	if (info->reg_ndcr & NDCR_DWIDTH_M)
1183 		column /= 2;
1184 
1185 	/*
1186 	 * There may be different NAND chip hooked to
1187 	 * different chip select, so check whether
1188 	 * chip select has been changed, if yes, reset the timing
1189 	 */
1190 	if (info->cs != host->cs) {
1191 		info->cs = host->cs;
1192 		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1193 		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1194 	}
1195 
1196 	prepare_start_command(info, command);
1197 
1198 	info->state = STATE_PREPARED;
1199 	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1200 
1201 	if (exec_cmd) {
1202 		init_completion(&info->cmd_complete);
1203 		init_completion(&info->dev_ready);
1204 		info->need_wait = 1;
1205 		pxa3xx_nand_start(info);
1206 
1207 		if (!wait_for_completion_timeout(&info->cmd_complete,
1208 		    CHIP_DELAY_TIMEOUT)) {
1209 			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1210 			/* Stop State Machine for next command cycle */
1211 			pxa3xx_nand_stop(info);
1212 		}
1213 	}
1214 	info->state = STATE_IDLE;
1215 }
1216 
nand_cmdfunc_extended(struct mtd_info * mtd,const unsigned command,int column,int page_addr)1217 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1218 				  const unsigned command,
1219 				  int column, int page_addr)
1220 {
1221 	struct nand_chip *chip = mtd_to_nand(mtd);
1222 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1223 	struct pxa3xx_nand_info *info = host->info_data;
1224 	int exec_cmd, ext_cmd_type;
1225 
1226 	/*
1227 	 * if this is a x16 device then convert the input
1228 	 * "byte" address into a "word" address appropriate
1229 	 * for indexing a word-oriented device
1230 	 */
1231 	if (info->reg_ndcr & NDCR_DWIDTH_M)
1232 		column /= 2;
1233 
1234 	/*
1235 	 * There may be different NAND chip hooked to
1236 	 * different chip select, so check whether
1237 	 * chip select has been changed, if yes, reset the timing
1238 	 */
1239 	if (info->cs != host->cs) {
1240 		info->cs = host->cs;
1241 		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1242 		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1243 	}
1244 
1245 	/* Select the extended command for the first command */
1246 	switch (command) {
1247 	case NAND_CMD_READ0:
1248 	case NAND_CMD_READOOB:
1249 		ext_cmd_type = EXT_CMD_TYPE_MONO;
1250 		break;
1251 	case NAND_CMD_SEQIN:
1252 		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1253 		break;
1254 	case NAND_CMD_PAGEPROG:
1255 		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1256 		break;
1257 	default:
1258 		ext_cmd_type = 0;
1259 		break;
1260 	}
1261 
1262 	prepare_start_command(info, command);
1263 
1264 	/*
1265 	 * Prepare the "is ready" completion before starting a command
1266 	 * transaction sequence. If the command is not executed the
1267 	 * completion will be completed, see below.
1268 	 *
1269 	 * We can do that inside the loop because the command variable
1270 	 * is invariant and thus so is the exec_cmd.
1271 	 */
1272 	info->need_wait = 1;
1273 	init_completion(&info->dev_ready);
1274 	do {
1275 		info->state = STATE_PREPARED;
1276 
1277 		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1278 					       column, page_addr);
1279 		if (!exec_cmd) {
1280 			info->need_wait = 0;
1281 			complete(&info->dev_ready);
1282 			break;
1283 		}
1284 
1285 		init_completion(&info->cmd_complete);
1286 		pxa3xx_nand_start(info);
1287 
1288 		if (!wait_for_completion_timeout(&info->cmd_complete,
1289 		    CHIP_DELAY_TIMEOUT)) {
1290 			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1291 			/* Stop State Machine for next command cycle */
1292 			pxa3xx_nand_stop(info);
1293 			break;
1294 		}
1295 
1296 		/* Only a few commands need several steps */
1297 		if (command != NAND_CMD_PAGEPROG &&
1298 		    command != NAND_CMD_READ0    &&
1299 		    command != NAND_CMD_READOOB)
1300 			break;
1301 
1302 		info->cur_chunk++;
1303 
1304 		/* Check if the sequence is complete */
1305 		if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1306 			break;
1307 
1308 		/*
1309 		 * After a splitted program command sequence has issued
1310 		 * the command dispatch, the command sequence is complete.
1311 		 */
1312 		if (info->cur_chunk == (info->ntotalchunks + 1) &&
1313 		    command == NAND_CMD_PAGEPROG &&
1314 		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1315 			break;
1316 
1317 		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1318 			/* Last read: issue a 'last naked read' */
1319 			if (info->cur_chunk == info->ntotalchunks - 1)
1320 				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1321 			else
1322 				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1323 
1324 		/*
1325 		 * If a splitted program command has no more data to transfer,
1326 		 * the command dispatch must be issued to complete.
1327 		 */
1328 		} else if (command == NAND_CMD_PAGEPROG &&
1329 			   info->cur_chunk == info->ntotalchunks) {
1330 				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1331 		}
1332 	} while (1);
1333 
1334 	info->state = STATE_IDLE;
1335 }
1336 
pxa3xx_nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1337 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1338 		struct nand_chip *chip, const uint8_t *buf, int oob_required,
1339 		int page)
1340 {
1341 	chip->write_buf(mtd, buf, mtd->writesize);
1342 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1343 
1344 	return 0;
1345 }
1346 
pxa3xx_nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1347 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1348 		struct nand_chip *chip, uint8_t *buf, int oob_required,
1349 		int page)
1350 {
1351 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1352 	struct pxa3xx_nand_info *info = host->info_data;
1353 
1354 	chip->read_buf(mtd, buf, mtd->writesize);
1355 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1356 
1357 	if (info->retcode == ERR_CORERR && info->use_ecc) {
1358 		mtd->ecc_stats.corrected += info->ecc_err_cnt;
1359 
1360 	} else if (info->retcode == ERR_UNCORERR) {
1361 		/*
1362 		 * for blank page (all 0xff), HW will calculate its ECC as
1363 		 * 0, which is different from the ECC information within
1364 		 * OOB, ignore such uncorrectable errors
1365 		 */
1366 		if (is_buf_blank(buf, mtd->writesize))
1367 			info->retcode = ERR_NONE;
1368 		else
1369 			mtd->ecc_stats.failed++;
1370 	}
1371 
1372 	return info->max_bitflips;
1373 }
1374 
pxa3xx_nand_read_byte(struct mtd_info * mtd)1375 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1376 {
1377 	struct nand_chip *chip = mtd_to_nand(mtd);
1378 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1379 	struct pxa3xx_nand_info *info = host->info_data;
1380 	char retval = 0xFF;
1381 
1382 	if (info->buf_start < info->buf_count)
1383 		/* Has just send a new command? */
1384 		retval = info->data_buff[info->buf_start++];
1385 
1386 	return retval;
1387 }
1388 
pxa3xx_nand_read_word(struct mtd_info * mtd)1389 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1390 {
1391 	struct nand_chip *chip = mtd_to_nand(mtd);
1392 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1393 	struct pxa3xx_nand_info *info = host->info_data;
1394 	u16 retval = 0xFFFF;
1395 
1396 	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1397 		retval = *((u16 *)(info->data_buff+info->buf_start));
1398 		info->buf_start += 2;
1399 	}
1400 	return retval;
1401 }
1402 
pxa3xx_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1403 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1404 {
1405 	struct nand_chip *chip = mtd_to_nand(mtd);
1406 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1407 	struct pxa3xx_nand_info *info = host->info_data;
1408 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1409 
1410 	memcpy(buf, info->data_buff + info->buf_start, real_len);
1411 	info->buf_start += real_len;
1412 }
1413 
pxa3xx_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1414 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1415 		const uint8_t *buf, int len)
1416 {
1417 	struct nand_chip *chip = mtd_to_nand(mtd);
1418 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1419 	struct pxa3xx_nand_info *info = host->info_data;
1420 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1421 
1422 	memcpy(info->data_buff + info->buf_start, buf, real_len);
1423 	info->buf_start += real_len;
1424 }
1425 
pxa3xx_nand_select_chip(struct mtd_info * mtd,int chip)1426 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1427 {
1428 	return;
1429 }
1430 
pxa3xx_nand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1431 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1432 {
1433 	struct nand_chip *chip = mtd_to_nand(mtd);
1434 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1435 	struct pxa3xx_nand_info *info = host->info_data;
1436 
1437 	if (info->need_wait) {
1438 		info->need_wait = 0;
1439 		if (!wait_for_completion_timeout(&info->dev_ready,
1440 		    CHIP_DELAY_TIMEOUT)) {
1441 			dev_err(&info->pdev->dev, "Ready time out!!!\n");
1442 			return NAND_STATUS_FAIL;
1443 		}
1444 	}
1445 
1446 	/* pxa3xx_nand_send_command has waited for command complete */
1447 	if (this->state == FL_WRITING || this->state == FL_ERASING) {
1448 		if (info->retcode == ERR_NONE)
1449 			return 0;
1450 		else
1451 			return NAND_STATUS_FAIL;
1452 	}
1453 
1454 	return NAND_STATUS_READY;
1455 }
1456 
pxa3xx_nand_config_ident(struct pxa3xx_nand_info * info)1457 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1458 {
1459 	struct pxa3xx_nand_host *host = info->host[info->cs];
1460 	struct platform_device *pdev = info->pdev;
1461 	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1462 	const struct nand_sdr_timings *timings;
1463 
1464 	/* Configure default flash values */
1465 	info->chunk_size = PAGE_CHUNK_SIZE;
1466 	info->reg_ndcr = 0x0; /* enable all interrupts */
1467 	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1468 	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1469 	info->reg_ndcr |= NDCR_SPARE_EN;
1470 
1471 	/* use the common timing to make a try */
1472 	timings = onfi_async_timing_mode_to_sdr_timings(0);
1473 	if (IS_ERR(timings))
1474 		return PTR_ERR(timings);
1475 
1476 	pxa3xx_nand_set_sdr_timing(host, timings);
1477 	return 0;
1478 }
1479 
pxa3xx_nand_config_tail(struct pxa3xx_nand_info * info)1480 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1481 {
1482 	struct pxa3xx_nand_host *host = info->host[info->cs];
1483 	struct nand_chip *chip = &host->chip;
1484 	struct mtd_info *mtd = nand_to_mtd(chip);
1485 
1486 	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1487 	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1488 	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1489 }
1490 
pxa3xx_nand_detect_config(struct pxa3xx_nand_info * info)1491 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1492 {
1493 	struct platform_device *pdev = info->pdev;
1494 	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1495 	uint32_t ndcr = nand_readl(info, NDCR);
1496 
1497 	/* Set an initial chunk size */
1498 	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1499 	info->reg_ndcr = ndcr &
1500 		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1501 	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1502 	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1503 	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1504 }
1505 
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1506 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1507 {
1508 	struct platform_device *pdev = info->pdev;
1509 	struct dma_slave_config	config;
1510 	dma_cap_mask_t mask;
1511 	struct pxad_param param;
1512 	int ret;
1513 
1514 	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1515 	if (info->data_buff == NULL)
1516 		return -ENOMEM;
1517 	if (use_dma == 0)
1518 		return 0;
1519 
1520 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1521 	if (ret)
1522 		return ret;
1523 
1524 	sg_init_one(&info->sg, info->data_buff, info->buf_size);
1525 	dma_cap_zero(mask);
1526 	dma_cap_set(DMA_SLAVE, mask);
1527 	param.prio = PXAD_PRIO_LOWEST;
1528 	param.drcmr = info->drcmr_dat;
1529 	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1530 							  &param, &pdev->dev,
1531 							  "data");
1532 	if (!info->dma_chan) {
1533 		dev_err(&pdev->dev, "unable to request data dma channel\n");
1534 		return -ENODEV;
1535 	}
1536 
1537 	memset(&config, 0, sizeof(config));
1538 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1539 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1540 	config.src_addr = info->mmio_phys + NDDB;
1541 	config.dst_addr = info->mmio_phys + NDDB;
1542 	config.src_maxburst = 32;
1543 	config.dst_maxburst = 32;
1544 	ret = dmaengine_slave_config(info->dma_chan, &config);
1545 	if (ret < 0) {
1546 		dev_err(&info->pdev->dev,
1547 			"dma channel configuration failed: %d\n",
1548 			ret);
1549 		return ret;
1550 	}
1551 
1552 	/*
1553 	 * Now that DMA buffers are allocated we turn on
1554 	 * DMA proper for I/O operations.
1555 	 */
1556 	info->use_dma = 1;
1557 	return 0;
1558 }
1559 
pxa3xx_nand_free_buff(struct pxa3xx_nand_info * info)1560 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1561 {
1562 	if (info->use_dma) {
1563 		dmaengine_terminate_all(info->dma_chan);
1564 		dma_release_channel(info->dma_chan);
1565 	}
1566 	kfree(info->data_buff);
1567 }
1568 
pxa_ecc_init(struct pxa3xx_nand_info * info,struct mtd_info * mtd,int strength,int ecc_stepsize,int page_size)1569 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1570 			struct mtd_info *mtd,
1571 			int strength, int ecc_stepsize, int page_size)
1572 {
1573 	struct nand_chip *chip = mtd_to_nand(mtd);
1574 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1575 
1576 	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1577 		info->nfullchunks = 1;
1578 		info->ntotalchunks = 1;
1579 		info->chunk_size = 2048;
1580 		info->spare_size = 40;
1581 		info->ecc_size = 24;
1582 		ecc->mode = NAND_ECC_HW;
1583 		ecc->size = 512;
1584 		ecc->strength = 1;
1585 
1586 	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1587 		info->nfullchunks = 1;
1588 		info->ntotalchunks = 1;
1589 		info->chunk_size = 512;
1590 		info->spare_size = 8;
1591 		info->ecc_size = 8;
1592 		ecc->mode = NAND_ECC_HW;
1593 		ecc->size = 512;
1594 		ecc->strength = 1;
1595 
1596 	/*
1597 	 * Required ECC: 4-bit correction per 512 bytes
1598 	 * Select: 16-bit correction per 2048 bytes
1599 	 */
1600 	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1601 		info->ecc_bch = 1;
1602 		info->nfullchunks = 1;
1603 		info->ntotalchunks = 1;
1604 		info->chunk_size = 2048;
1605 		info->spare_size = 32;
1606 		info->ecc_size = 32;
1607 		ecc->mode = NAND_ECC_HW;
1608 		ecc->size = info->chunk_size;
1609 		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1610 		ecc->strength = 16;
1611 
1612 	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1613 		info->ecc_bch = 1;
1614 		info->nfullchunks = 2;
1615 		info->ntotalchunks = 2;
1616 		info->chunk_size = 2048;
1617 		info->spare_size = 32;
1618 		info->ecc_size = 32;
1619 		ecc->mode = NAND_ECC_HW;
1620 		ecc->size = info->chunk_size;
1621 		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1622 		ecc->strength = 16;
1623 
1624 	/*
1625 	 * Required ECC: 8-bit correction per 512 bytes
1626 	 * Select: 16-bit correction per 1024 bytes
1627 	 */
1628 	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1629 		info->ecc_bch = 1;
1630 		info->nfullchunks = 4;
1631 		info->ntotalchunks = 5;
1632 		info->chunk_size = 1024;
1633 		info->spare_size = 0;
1634 		info->last_chunk_size = 0;
1635 		info->last_spare_size = 64;
1636 		info->ecc_size = 32;
1637 		ecc->mode = NAND_ECC_HW;
1638 		ecc->size = info->chunk_size;
1639 		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1640 		ecc->strength = 16;
1641 	} else {
1642 		dev_err(&info->pdev->dev,
1643 			"ECC strength %d at page size %d is not supported\n",
1644 			strength, page_size);
1645 		return -ENODEV;
1646 	}
1647 
1648 	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1649 		 ecc->strength, ecc->size);
1650 	return 0;
1651 }
1652 
pxa3xx_nand_scan(struct mtd_info * mtd)1653 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1654 {
1655 	struct nand_chip *chip = mtd_to_nand(mtd);
1656 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1657 	struct pxa3xx_nand_info *info = host->info_data;
1658 	struct platform_device *pdev = info->pdev;
1659 	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1660 	int ret;
1661 	uint16_t ecc_strength, ecc_step;
1662 
1663 	if (pdata->keep_config) {
1664 		pxa3xx_nand_detect_config(info);
1665 	} else {
1666 		ret = pxa3xx_nand_config_ident(info);
1667 		if (ret)
1668 			return ret;
1669 	}
1670 
1671 	if (info->reg_ndcr & NDCR_DWIDTH_M)
1672 		chip->options |= NAND_BUSWIDTH_16;
1673 
1674 	/* Device detection must be done with ECC disabled */
1675 	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1676 		nand_writel(info, NDECCCTRL, 0x0);
1677 
1678 	if (pdata->flash_bbt)
1679 		chip->bbt_options |= NAND_BBT_USE_FLASH;
1680 
1681 	chip->ecc.strength = pdata->ecc_strength;
1682 	chip->ecc.size = pdata->ecc_step_size;
1683 
1684 	ret = nand_scan_ident(mtd, 1, NULL);
1685 	if (ret)
1686 		return ret;
1687 
1688 	if (!pdata->keep_config) {
1689 		ret = pxa3xx_nand_init(host);
1690 		if (ret) {
1691 			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1692 				ret);
1693 			return ret;
1694 		}
1695 	}
1696 
1697 	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
1698 		/*
1699 		 * We'll use a bad block table stored in-flash and don't
1700 		 * allow writing the bad block marker to the flash.
1701 		 */
1702 		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
1703 		chip->bbt_td = &bbt_main_descr;
1704 		chip->bbt_md = &bbt_mirror_descr;
1705 	}
1706 
1707 	/*
1708 	 * If the page size is bigger than the FIFO size, let's check
1709 	 * we are given the right variant and then switch to the extended
1710 	 * (aka splitted) command handling,
1711 	 */
1712 	if (mtd->writesize > PAGE_CHUNK_SIZE) {
1713 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1714 			chip->cmdfunc = nand_cmdfunc_extended;
1715 		} else {
1716 			dev_err(&info->pdev->dev,
1717 				"unsupported page size on this variant\n");
1718 			return -ENODEV;
1719 		}
1720 	}
1721 
1722 	ecc_strength = chip->ecc.strength;
1723 	ecc_step = chip->ecc.size;
1724 	if (!ecc_strength || !ecc_step) {
1725 		ecc_strength = chip->ecc_strength_ds;
1726 		ecc_step = chip->ecc_step_ds;
1727 	}
1728 
1729 	/* Set default ECC strength requirements on non-ONFI devices */
1730 	if (ecc_strength < 1 && ecc_step < 1) {
1731 		ecc_strength = 1;
1732 		ecc_step = 512;
1733 	}
1734 
1735 	ret = pxa_ecc_init(info, mtd, ecc_strength,
1736 			   ecc_step, mtd->writesize);
1737 	if (ret)
1738 		return ret;
1739 
1740 	/* calculate addressing information */
1741 	if (mtd->writesize >= 2048)
1742 		host->col_addr_cycles = 2;
1743 	else
1744 		host->col_addr_cycles = 1;
1745 
1746 	/* release the initial buffer */
1747 	kfree(info->data_buff);
1748 
1749 	/* allocate the real data + oob buffer */
1750 	info->buf_size = mtd->writesize + mtd->oobsize;
1751 	ret = pxa3xx_nand_init_buff(info);
1752 	if (ret)
1753 		return ret;
1754 	info->oob_buff = info->data_buff + mtd->writesize;
1755 
1756 	if ((mtd->size >> chip->page_shift) > 65536)
1757 		host->row_addr_cycles = 3;
1758 	else
1759 		host->row_addr_cycles = 2;
1760 
1761 	if (!pdata->keep_config)
1762 		pxa3xx_nand_config_tail(info);
1763 
1764 	return nand_scan_tail(mtd);
1765 }
1766 
alloc_nand_resource(struct platform_device * pdev)1767 static int alloc_nand_resource(struct platform_device *pdev)
1768 {
1769 	struct device_node *np = pdev->dev.of_node;
1770 	struct pxa3xx_nand_platform_data *pdata;
1771 	struct pxa3xx_nand_info *info;
1772 	struct pxa3xx_nand_host *host;
1773 	struct nand_chip *chip = NULL;
1774 	struct mtd_info *mtd;
1775 	struct resource *r;
1776 	int ret, irq, cs;
1777 
1778 	pdata = dev_get_platdata(&pdev->dev);
1779 	if (pdata->num_cs <= 0) {
1780 		dev_err(&pdev->dev, "invalid number of chip selects\n");
1781 		return -ENODEV;
1782 	}
1783 
1784 	info = devm_kzalloc(&pdev->dev,
1785 			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
1786 			    GFP_KERNEL);
1787 	if (!info)
1788 		return -ENOMEM;
1789 
1790 	info->pdev = pdev;
1791 	info->variant = pxa3xx_nand_get_variant(pdev);
1792 	for (cs = 0; cs < pdata->num_cs; cs++) {
1793 		host = (void *)&info[1] + sizeof(*host) * cs;
1794 		chip = &host->chip;
1795 		nand_set_controller_data(chip, host);
1796 		mtd = nand_to_mtd(chip);
1797 		info->host[cs] = host;
1798 		host->cs = cs;
1799 		host->info_data = info;
1800 		mtd->dev.parent = &pdev->dev;
1801 		/* FIXME: all chips use the same device tree partitions */
1802 		nand_set_flash_node(chip, np);
1803 
1804 		nand_set_controller_data(chip, host);
1805 		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
1806 		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
1807 		chip->controller        = &info->controller;
1808 		chip->waitfunc		= pxa3xx_nand_waitfunc;
1809 		chip->select_chip	= pxa3xx_nand_select_chip;
1810 		chip->read_word		= pxa3xx_nand_read_word;
1811 		chip->read_byte		= pxa3xx_nand_read_byte;
1812 		chip->read_buf		= pxa3xx_nand_read_buf;
1813 		chip->write_buf		= pxa3xx_nand_write_buf;
1814 		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1815 		chip->cmdfunc		= nand_cmdfunc;
1816 		chip->onfi_set_features	= nand_onfi_get_set_features_notsupp;
1817 		chip->onfi_get_features	= nand_onfi_get_set_features_notsupp;
1818 	}
1819 
1820 	nand_hw_control_init(chip->controller);
1821 	info->clk = devm_clk_get(&pdev->dev, NULL);
1822 	if (IS_ERR(info->clk)) {
1823 		ret = PTR_ERR(info->clk);
1824 		dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
1825 		return ret;
1826 	}
1827 	ret = clk_prepare_enable(info->clk);
1828 	if (ret < 0)
1829 		return ret;
1830 
1831 	if (!np && use_dma) {
1832 		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1833 		if (r == NULL) {
1834 			dev_err(&pdev->dev,
1835 				"no resource defined for data DMA\n");
1836 			ret = -ENXIO;
1837 			goto fail_disable_clk;
1838 		}
1839 		info->drcmr_dat = r->start;
1840 	}
1841 
1842 	irq = platform_get_irq(pdev, 0);
1843 	if (irq < 0) {
1844 		dev_err(&pdev->dev, "no IRQ resource defined\n");
1845 		ret = -ENXIO;
1846 		goto fail_disable_clk;
1847 	}
1848 
1849 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1850 	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1851 	if (IS_ERR(info->mmio_base)) {
1852 		ret = PTR_ERR(info->mmio_base);
1853 		dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
1854 		goto fail_disable_clk;
1855 	}
1856 	info->mmio_phys = r->start;
1857 
1858 	/* Allocate a buffer to allow flash detection */
1859 	info->buf_size = INIT_BUFFER_SIZE;
1860 	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1861 	if (info->data_buff == NULL) {
1862 		ret = -ENOMEM;
1863 		goto fail_disable_clk;
1864 	}
1865 
1866 	/* initialize all interrupts to be disabled */
1867 	disable_int(info, NDSR_MASK);
1868 
1869 	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1870 				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1871 				   pdev->name, info);
1872 	if (ret < 0) {
1873 		dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
1874 		goto fail_free_buf;
1875 	}
1876 
1877 	platform_set_drvdata(pdev, info);
1878 
1879 	return 0;
1880 
1881 fail_free_buf:
1882 	free_irq(irq, info);
1883 	kfree(info->data_buff);
1884 fail_disable_clk:
1885 	clk_disable_unprepare(info->clk);
1886 	return ret;
1887 }
1888 
pxa3xx_nand_remove(struct platform_device * pdev)1889 static int pxa3xx_nand_remove(struct platform_device *pdev)
1890 {
1891 	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1892 	struct pxa3xx_nand_platform_data *pdata;
1893 	int irq, cs;
1894 
1895 	if (!info)
1896 		return 0;
1897 
1898 	pdata = dev_get_platdata(&pdev->dev);
1899 
1900 	irq = platform_get_irq(pdev, 0);
1901 	if (irq >= 0)
1902 		free_irq(irq, info);
1903 	pxa3xx_nand_free_buff(info);
1904 
1905 	/*
1906 	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1907 	 * In order to prevent a lockup of the system bus, the DFI bus
1908 	 * arbitration is granted to SMC upon driver removal. This is done by
1909 	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1910 	 * access to the bus anymore.
1911 	 */
1912 	nand_writel(info, NDCR,
1913 		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1914 		    NFCV1_NDCR_ARB_CNTL);
1915 	clk_disable_unprepare(info->clk);
1916 
1917 	for (cs = 0; cs < pdata->num_cs; cs++)
1918 		nand_release(nand_to_mtd(&info->host[cs]->chip));
1919 	return 0;
1920 }
1921 
pxa3xx_nand_probe_dt(struct platform_device * pdev)1922 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1923 {
1924 	struct pxa3xx_nand_platform_data *pdata;
1925 	struct device_node *np = pdev->dev.of_node;
1926 	const struct of_device_id *of_id =
1927 			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1928 
1929 	if (!of_id)
1930 		return 0;
1931 
1932 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1933 	if (!pdata)
1934 		return -ENOMEM;
1935 
1936 	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1937 		pdata->enable_arbiter = 1;
1938 	if (of_get_property(np, "marvell,nand-keep-config", NULL))
1939 		pdata->keep_config = 1;
1940 	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1941 
1942 	pdev->dev.platform_data = pdata;
1943 
1944 	return 0;
1945 }
1946 
pxa3xx_nand_probe(struct platform_device * pdev)1947 static int pxa3xx_nand_probe(struct platform_device *pdev)
1948 {
1949 	struct pxa3xx_nand_platform_data *pdata;
1950 	struct pxa3xx_nand_info *info;
1951 	int ret, cs, probe_success, dma_available;
1952 
1953 	dma_available = IS_ENABLED(CONFIG_ARM) &&
1954 		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1955 	if (use_dma && !dma_available) {
1956 		use_dma = 0;
1957 		dev_warn(&pdev->dev,
1958 			 "This platform can't do DMA on this device\n");
1959 	}
1960 
1961 	ret = pxa3xx_nand_probe_dt(pdev);
1962 	if (ret)
1963 		return ret;
1964 
1965 	pdata = dev_get_platdata(&pdev->dev);
1966 	if (!pdata) {
1967 		dev_err(&pdev->dev, "no platform data defined\n");
1968 		return -ENODEV;
1969 	}
1970 
1971 	ret = alloc_nand_resource(pdev);
1972 	if (ret)
1973 		return ret;
1974 
1975 	info = platform_get_drvdata(pdev);
1976 	probe_success = 0;
1977 	for (cs = 0; cs < pdata->num_cs; cs++) {
1978 		struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1979 
1980 		/*
1981 		 * The mtd name matches the one used in 'mtdparts' kernel
1982 		 * parameter. This name cannot be changed or otherwise
1983 		 * user's mtd partitions configuration would get broken.
1984 		 */
1985 		mtd->name = "pxa3xx_nand-0";
1986 		info->cs = cs;
1987 		ret = pxa3xx_nand_scan(mtd);
1988 		if (ret) {
1989 			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1990 				cs);
1991 			continue;
1992 		}
1993 
1994 		ret = mtd_device_register(mtd, pdata->parts[cs],
1995 					  pdata->nr_parts[cs]);
1996 		if (!ret)
1997 			probe_success = 1;
1998 	}
1999 
2000 	if (!probe_success) {
2001 		pxa3xx_nand_remove(pdev);
2002 		return -ENODEV;
2003 	}
2004 
2005 	return 0;
2006 }
2007 
2008 #ifdef CONFIG_PM
pxa3xx_nand_suspend(struct device * dev)2009 static int pxa3xx_nand_suspend(struct device *dev)
2010 {
2011 	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2012 
2013 	if (info->state) {
2014 		dev_err(dev, "driver busy, state = %d\n", info->state);
2015 		return -EAGAIN;
2016 	}
2017 
2018 	clk_disable(info->clk);
2019 	return 0;
2020 }
2021 
pxa3xx_nand_resume(struct device * dev)2022 static int pxa3xx_nand_resume(struct device *dev)
2023 {
2024 	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2025 	int ret;
2026 
2027 	ret = clk_enable(info->clk);
2028 	if (ret < 0)
2029 		return ret;
2030 
2031 	/* We don't want to handle interrupt without calling mtd routine */
2032 	disable_int(info, NDCR_INT_MASK);
2033 
2034 	/*
2035 	 * Directly set the chip select to a invalid value,
2036 	 * then the driver would reset the timing according
2037 	 * to current chip select at the beginning of cmdfunc
2038 	 */
2039 	info->cs = 0xff;
2040 
2041 	/*
2042 	 * As the spec says, the NDSR would be updated to 0x1800 when
2043 	 * doing the nand_clk disable/enable.
2044 	 * To prevent it damaging state machine of the driver, clear
2045 	 * all status before resume
2046 	 */
2047 	nand_writel(info, NDSR, NDSR_MASK);
2048 
2049 	return 0;
2050 }
2051 #else
2052 #define pxa3xx_nand_suspend	NULL
2053 #define pxa3xx_nand_resume	NULL
2054 #endif
2055 
2056 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2057 	.suspend	= pxa3xx_nand_suspend,
2058 	.resume		= pxa3xx_nand_resume,
2059 };
2060 
2061 static struct platform_driver pxa3xx_nand_driver = {
2062 	.driver = {
2063 		.name	= "pxa3xx-nand",
2064 		.of_match_table = pxa3xx_nand_dt_ids,
2065 		.pm	= &pxa3xx_nand_pm_ops,
2066 	},
2067 	.probe		= pxa3xx_nand_probe,
2068 	.remove		= pxa3xx_nand_remove,
2069 };
2070 
2071 module_platform_driver(pxa3xx_nand_driver);
2072 
2073 MODULE_LICENSE("GPL");
2074 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
2075