• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8 
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <linux/errno.h>
14 #include <asm/io.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
19 
20 #include "pxa3xx_nand.h"
21 
22 DECLARE_GLOBAL_DATA_PTR;
23 
24 #define TIMEOUT_DRAIN_FIFO	5	/* in ms */
25 #define	CHIP_DELAY_TIMEOUT	200
26 #define NAND_STOP_DELAY		40
27 #define PAGE_CHUNK_SIZE		(2048)
28 
29 /*
30  * Define a buffer size for the initial command that detects the flash device:
31  * STATUS, READID and PARAM. The largest of these is the PARAM command,
32  * needing 256 bytes.
33  */
34 #define INIT_BUFFER_SIZE	256
35 
36 /* registers and bit definitions */
37 #define NDCR		(0x00) /* Control register */
38 #define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
39 #define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
40 #define NDSR		(0x14) /* Status Register */
41 #define NDPCR		(0x18) /* Page Count Register */
42 #define NDBDR0		(0x1C) /* Bad Block Register 0 */
43 #define NDBDR1		(0x20) /* Bad Block Register 1 */
44 #define NDECCCTRL	(0x28) /* ECC control */
45 #define NDDB		(0x40) /* Data Buffer */
46 #define NDCB0		(0x48) /* Command Buffer0 */
47 #define NDCB1		(0x4C) /* Command Buffer1 */
48 #define NDCB2		(0x50) /* Command Buffer2 */
49 
50 #define NDCR_SPARE_EN		(0x1 << 31)
51 #define NDCR_ECC_EN		(0x1 << 30)
52 #define NDCR_DMA_EN		(0x1 << 29)
53 #define NDCR_ND_RUN		(0x1 << 28)
54 #define NDCR_DWIDTH_C		(0x1 << 27)
55 #define NDCR_DWIDTH_M		(0x1 << 26)
56 #define NDCR_PAGE_SZ		(0x1 << 24)
57 #define NDCR_NCSX		(0x1 << 23)
58 #define NDCR_ND_MODE		(0x3 << 21)
59 #define NDCR_NAND_MODE		(0x0)
60 #define NDCR_CLR_PG_CNT		(0x1 << 20)
61 #define NDCR_STOP_ON_UNCOR	(0x1 << 19)
62 #define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
63 #define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
64 
65 #define NDCR_RA_START		(0x1 << 15)
66 #define NDCR_PG_PER_BLK		(0x1 << 14)
67 #define NDCR_ND_ARB_EN		(0x1 << 12)
68 #define NDCR_INT_MASK           (0xFFF)
69 
70 #define NDSR_MASK		(0xfff)
71 #define NDSR_ERR_CNT_OFF	(16)
72 #define NDSR_ERR_CNT_MASK       (0x1f)
73 #define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
74 #define NDSR_RDY                (0x1 << 12)
75 #define NDSR_FLASH_RDY          (0x1 << 11)
76 #define NDSR_CS0_PAGED		(0x1 << 10)
77 #define NDSR_CS1_PAGED		(0x1 << 9)
78 #define NDSR_CS0_CMDD		(0x1 << 8)
79 #define NDSR_CS1_CMDD		(0x1 << 7)
80 #define NDSR_CS0_BBD		(0x1 << 6)
81 #define NDSR_CS1_BBD		(0x1 << 5)
82 #define NDSR_UNCORERR		(0x1 << 4)
83 #define NDSR_CORERR		(0x1 << 3)
84 #define NDSR_WRDREQ		(0x1 << 2)
85 #define NDSR_RDDREQ		(0x1 << 1)
86 #define NDSR_WRCMDREQ		(0x1)
87 
88 #define NDCB0_LEN_OVRD		(0x1 << 28)
89 #define NDCB0_ST_ROW_EN         (0x1 << 26)
90 #define NDCB0_AUTO_RS		(0x1 << 25)
91 #define NDCB0_CSEL		(0x1 << 24)
92 #define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
93 #define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
94 #define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
95 #define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
96 #define NDCB0_NC		(0x1 << 20)
97 #define NDCB0_DBC		(0x1 << 19)
98 #define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
99 #define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
100 #define NDCB0_CMD2_MASK		(0xff << 8)
101 #define NDCB0_CMD1_MASK		(0xff)
102 #define NDCB0_ADDR_CYC_SHIFT	(16)
103 
104 #define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
105 #define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
106 #define EXT_CMD_TYPE_READ	4 /* Read */
107 #define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
108 #define EXT_CMD_TYPE_FINAL	3 /* Final command */
109 #define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
110 #define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
111 
112 /* macros for registers read/write */
113 #define nand_writel(info, off, val)	\
114 	writel((val), (info)->mmio_base + (off))
115 
116 #define nand_readl(info, off)		\
117 	readl((info)->mmio_base + (off))
118 
119 /* error code and state */
120 enum {
121 	ERR_NONE	= 0,
122 	ERR_DMABUSERR	= -1,
123 	ERR_SENDCMD	= -2,
124 	ERR_UNCORERR	= -3,
125 	ERR_BBERR	= -4,
126 	ERR_CORERR	= -5,
127 };
128 
129 enum {
130 	STATE_IDLE = 0,
131 	STATE_PREPARED,
132 	STATE_CMD_HANDLE,
133 	STATE_DMA_READING,
134 	STATE_DMA_WRITING,
135 	STATE_DMA_DONE,
136 	STATE_PIO_READING,
137 	STATE_PIO_WRITING,
138 	STATE_CMD_DONE,
139 	STATE_READY,
140 };
141 
142 enum pxa3xx_nand_variant {
143 	PXA3XX_NAND_VARIANT_PXA,
144 	PXA3XX_NAND_VARIANT_ARMADA370,
145 };
146 
147 struct pxa3xx_nand_host {
148 	struct nand_chip	chip;
149 	struct mtd_info         *mtd;
150 	void			*info_data;
151 
152 	/* page size of attached chip */
153 	int			use_ecc;
154 	int			cs;
155 
156 	/* calculated from pxa3xx_nand_flash data */
157 	unsigned int		col_addr_cycles;
158 	unsigned int		row_addr_cycles;
159 	size_t			read_id_bytes;
160 
161 };
162 
163 struct pxa3xx_nand_info {
164 	struct nand_hw_control	controller;
165 	struct pxa3xx_nand_platform_data *pdata;
166 
167 	struct clk		*clk;
168 	void __iomem		*mmio_base;
169 	unsigned long		mmio_phys;
170 	int			cmd_complete, dev_ready;
171 
172 	unsigned int		buf_start;
173 	unsigned int		buf_count;
174 	unsigned int		buf_size;
175 	unsigned int		data_buff_pos;
176 	unsigned int		oob_buff_pos;
177 
178 	unsigned char		*data_buff;
179 	unsigned char		*oob_buff;
180 
181 	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
182 	unsigned int		state;
183 
184 	/*
185 	 * This driver supports NFCv1 (as found in PXA SoC)
186 	 * and NFCv2 (as found in Armada 370/XP SoC).
187 	 */
188 	enum pxa3xx_nand_variant variant;
189 
190 	int			cs;
191 	int			use_ecc;	/* use HW ECC ? */
192 	int			ecc_bch;	/* using BCH ECC? */
193 	int			use_spare;	/* use spare ? */
194 	int			need_wait;
195 
196 	unsigned int		data_size;	/* data to be read from FIFO */
197 	unsigned int		chunk_size;	/* split commands chunk size */
198 	unsigned int		oob_size;
199 	unsigned int		spare_size;
200 	unsigned int		ecc_size;
201 	unsigned int		ecc_err_cnt;
202 	unsigned int		max_bitflips;
203 	int			retcode;
204 
205 	/* cached register value */
206 	uint32_t		reg_ndcr;
207 	uint32_t		ndtr0cs0;
208 	uint32_t		ndtr1cs0;
209 
210 	/* generated NDCBx register values */
211 	uint32_t		ndcb0;
212 	uint32_t		ndcb1;
213 	uint32_t		ndcb2;
214 	uint32_t		ndcb3;
215 };
216 
217 static struct pxa3xx_nand_timing timing[] = {
218 	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
219 	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
220 	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
221 	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
222 };
223 
224 static struct pxa3xx_nand_flash builtin_flash_types[] = {
225 	{ 0x46ec, 16, 16, &timing[1] },
226 	{ 0xdaec,  8,  8, &timing[1] },
227 	{ 0xd7ec,  8,  8, &timing[1] },
228 	{ 0xa12c,  8,  8, &timing[2] },
229 	{ 0xb12c, 16, 16, &timing[2] },
230 	{ 0xdc2c,  8,  8, &timing[2] },
231 	{ 0xcc2c, 16, 16, &timing[2] },
232 	{ 0xba20, 16, 16, &timing[3] },
233 };
234 
235 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
236 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
237 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
238 
239 static struct nand_bbt_descr bbt_main_descr = {
240 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
241 		| NAND_BBT_2BIT | NAND_BBT_VERSION,
242 	.offs =	8,
243 	.len = 6,
244 	.veroffs = 14,
245 	.maxblocks = 8,		/* Last 8 blocks in each chip */
246 	.pattern = bbt_pattern
247 };
248 
249 static struct nand_bbt_descr bbt_mirror_descr = {
250 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
251 		| NAND_BBT_2BIT | NAND_BBT_VERSION,
252 	.offs =	8,
253 	.len = 6,
254 	.veroffs = 14,
255 	.maxblocks = 8,		/* Last 8 blocks in each chip */
256 	.pattern = bbt_mirror_pattern
257 };
258 #endif
259 
260 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
261 	.eccbytes = 32,
262 	.eccpos = {
263 		32, 33, 34, 35, 36, 37, 38, 39,
264 		40, 41, 42, 43, 44, 45, 46, 47,
265 		48, 49, 50, 51, 52, 53, 54, 55,
266 		56, 57, 58, 59, 60, 61, 62, 63},
267 	.oobfree = { {2, 30} }
268 };
269 
270 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
271 	.eccbytes = 64,
272 	.eccpos = {
273 		32,  33,  34,  35,  36,  37,  38,  39,
274 		40,  41,  42,  43,  44,  45,  46,  47,
275 		48,  49,  50,  51,  52,  53,  54,  55,
276 		56,  57,  58,  59,  60,  61,  62,  63,
277 		96,  97,  98,  99,  100, 101, 102, 103,
278 		104, 105, 106, 107, 108, 109, 110, 111,
279 		112, 113, 114, 115, 116, 117, 118, 119,
280 		120, 121, 122, 123, 124, 125, 126, 127},
281 	/* Bootrom looks in bytes 0 & 5 for bad blocks */
282 	.oobfree = { {6, 26}, { 64, 32} }
283 };
284 
285 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
286 	.eccbytes = 128,
287 	.eccpos = {
288 		32,  33,  34,  35,  36,  37,  38,  39,
289 		40,  41,  42,  43,  44,  45,  46,  47,
290 		48,  49,  50,  51,  52,  53,  54,  55,
291 		56,  57,  58,  59,  60,  61,  62,  63},
292 	.oobfree = { }
293 };
294 
295 #define NDTR0_tCH(c)	(min((c), 7) << 19)
296 #define NDTR0_tCS(c)	(min((c), 7) << 16)
297 #define NDTR0_tWH(c)	(min((c), 7) << 11)
298 #define NDTR0_tWP(c)	(min((c), 7) << 8)
299 #define NDTR0_tRH(c)	(min((c), 7) << 3)
300 #define NDTR0_tRP(c)	(min((c), 7) << 0)
301 
302 #define NDTR1_tR(c)	(min((c), 65535) << 16)
303 #define NDTR1_tWHR(c)	(min((c), 15) << 4)
304 #define NDTR1_tAR(c)	(min((c), 15) << 0)
305 
306 /* convert nano-seconds to nand flash controller clock cycles */
307 #define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
308 
pxa3xx_nand_get_variant(void)309 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
310 {
311 	/* We only support the Armada 370/XP/38x for now */
312 	return PXA3XX_NAND_VARIANT_ARMADA370;
313 }
314 
pxa3xx_nand_set_timing(struct pxa3xx_nand_host * host,const struct pxa3xx_nand_timing * t)315 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
316 				   const struct pxa3xx_nand_timing *t)
317 {
318 	struct pxa3xx_nand_info *info = host->info_data;
319 	unsigned long nand_clk = mvebu_get_nand_clock();
320 	uint32_t ndtr0, ndtr1;
321 
322 	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
323 		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
324 		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
325 		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
326 		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
327 		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
328 
329 	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
330 		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
331 		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
332 
333 	info->ndtr0cs0 = ndtr0;
334 	info->ndtr1cs0 = ndtr1;
335 	nand_writel(info, NDTR0CS0, ndtr0);
336 	nand_writel(info, NDTR1CS0, ndtr1);
337 }
338 
pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host * host,const struct nand_sdr_timings * t)339 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
340 				       const struct nand_sdr_timings *t)
341 {
342 	struct pxa3xx_nand_info *info = host->info_data;
343 	struct nand_chip *chip = &host->chip;
344 	unsigned long nand_clk = mvebu_get_nand_clock();
345 	uint32_t ndtr0, ndtr1;
346 
347 	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
348 	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
349 	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
350 	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
351 	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
352 	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
353 	u32 tR = chip->chip_delay * 1000;
354 	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
355 	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
356 
357 	/* fallback to a default value if tR = 0 */
358 	if (!tR)
359 		tR = 20000;
360 
361 	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
362 		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
363 		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
364 		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
365 		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
366 		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
367 
368 	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
369 		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
370 		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
371 
372 	info->ndtr0cs0 = ndtr0;
373 	info->ndtr1cs0 = ndtr1;
374 	nand_writel(info, NDTR0CS0, ndtr0);
375 	nand_writel(info, NDTR1CS0, ndtr1);
376 }
377 
pxa3xx_nand_init_timings(struct pxa3xx_nand_host * host)378 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
379 {
380 	const struct nand_sdr_timings *timings;
381 	struct nand_chip *chip = &host->chip;
382 	struct pxa3xx_nand_info *info = host->info_data;
383 	const struct pxa3xx_nand_flash *f = NULL;
384 	int mode, id, ntypes, i;
385 
386 	mode = onfi_get_async_timing_mode(chip);
387 	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
388 		ntypes = ARRAY_SIZE(builtin_flash_types);
389 
390 		chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
391 
392 		id = chip->read_byte(host->mtd);
393 		id |= chip->read_byte(host->mtd) << 0x8;
394 
395 		for (i = 0; i < ntypes; i++) {
396 			f = &builtin_flash_types[i];
397 
398 			if (f->chip_id == id)
399 				break;
400 		}
401 
402 		if (i == ntypes) {
403 			dev_err(&info->pdev->dev, "Error: timings not found\n");
404 			return -EINVAL;
405 		}
406 
407 		pxa3xx_nand_set_timing(host, f->timing);
408 
409 		if (f->flash_width == 16) {
410 			info->reg_ndcr |= NDCR_DWIDTH_M;
411 			chip->options |= NAND_BUSWIDTH_16;
412 		}
413 
414 		info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
415 	} else {
416 		mode = fls(mode) - 1;
417 		if (mode < 0)
418 			mode = 0;
419 
420 		timings = onfi_async_timing_mode_to_sdr_timings(mode);
421 		if (IS_ERR(timings))
422 			return PTR_ERR(timings);
423 
424 		pxa3xx_nand_set_sdr_timing(host, timings);
425 	}
426 
427 	return 0;
428 }
429 
430 /*
431  * Set the data and OOB size, depending on the selected
432  * spare and ECC configuration.
433  * Only applicable to READ0, READOOB and PAGEPROG commands.
434  */
pxa3xx_set_datasize(struct pxa3xx_nand_info * info,struct mtd_info * mtd)435 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
436 				struct mtd_info *mtd)
437 {
438 	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
439 
440 	info->data_size = mtd->writesize;
441 	if (!oob_enable)
442 		return;
443 
444 	info->oob_size = info->spare_size;
445 	if (!info->use_ecc)
446 		info->oob_size += info->ecc_size;
447 }
448 
449 /**
450  * NOTE: it is a must to set ND_RUN first, then write
451  * command buffer, otherwise, it does not work.
452  * We enable all the interrupt at the same time, and
453  * let pxa3xx_nand_irq to handle all logic.
454  */
pxa3xx_nand_start(struct pxa3xx_nand_info * info)455 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
456 {
457 	uint32_t ndcr;
458 
459 	ndcr = info->reg_ndcr;
460 
461 	if (info->use_ecc) {
462 		ndcr |= NDCR_ECC_EN;
463 		if (info->ecc_bch)
464 			nand_writel(info, NDECCCTRL, 0x1);
465 	} else {
466 		ndcr &= ~NDCR_ECC_EN;
467 		if (info->ecc_bch)
468 			nand_writel(info, NDECCCTRL, 0x0);
469 	}
470 
471 	ndcr &= ~NDCR_DMA_EN;
472 
473 	if (info->use_spare)
474 		ndcr |= NDCR_SPARE_EN;
475 	else
476 		ndcr &= ~NDCR_SPARE_EN;
477 
478 	ndcr |= NDCR_ND_RUN;
479 
480 	/* clear status bits and run */
481 	nand_writel(info, NDCR, 0);
482 	nand_writel(info, NDSR, NDSR_MASK);
483 	nand_writel(info, NDCR, ndcr);
484 }
485 
disable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)486 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
487 {
488 	uint32_t ndcr;
489 
490 	ndcr = nand_readl(info, NDCR);
491 	nand_writel(info, NDCR, ndcr | int_mask);
492 }
493 
drain_fifo(struct pxa3xx_nand_info * info,void * data,int len)494 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
495 {
496 	if (info->ecc_bch) {
497 		u32 ts;
498 
499 		/*
500 		 * According to the datasheet, when reading from NDDB
501 		 * with BCH enabled, after each 32 bytes reads, we
502 		 * have to make sure that the NDSR.RDDREQ bit is set.
503 		 *
504 		 * Drain the FIFO 8 32 bits reads at a time, and skip
505 		 * the polling on the last read.
506 		 */
507 		while (len > 8) {
508 			readsl(info->mmio_base + NDDB, data, 8);
509 
510 			ts = get_timer(0);
511 			while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
512 				if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
513 					dev_err(&info->pdev->dev,
514 						"Timeout on RDDREQ while draining the FIFO\n");
515 					return;
516 				}
517 			}
518 
519 			data += 32;
520 			len -= 8;
521 		}
522 	}
523 
524 	readsl(info->mmio_base + NDDB, data, len);
525 }
526 
handle_data_pio(struct pxa3xx_nand_info * info)527 static void handle_data_pio(struct pxa3xx_nand_info *info)
528 {
529 	unsigned int do_bytes = min(info->data_size, info->chunk_size);
530 
531 	switch (info->state) {
532 	case STATE_PIO_WRITING:
533 		writesl(info->mmio_base + NDDB,
534 			info->data_buff + info->data_buff_pos,
535 			DIV_ROUND_UP(do_bytes, 4));
536 
537 		if (info->oob_size > 0)
538 			writesl(info->mmio_base + NDDB,
539 				info->oob_buff + info->oob_buff_pos,
540 				DIV_ROUND_UP(info->oob_size, 4));
541 		break;
542 	case STATE_PIO_READING:
543 		drain_fifo(info,
544 			   info->data_buff + info->data_buff_pos,
545 			   DIV_ROUND_UP(do_bytes, 4));
546 
547 		if (info->oob_size > 0)
548 			drain_fifo(info,
549 				   info->oob_buff + info->oob_buff_pos,
550 				   DIV_ROUND_UP(info->oob_size, 4));
551 		break;
552 	default:
553 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
554 			info->state);
555 		BUG();
556 	}
557 
558 	/* Update buffer pointers for multi-page read/write */
559 	info->data_buff_pos += do_bytes;
560 	info->oob_buff_pos += info->oob_size;
561 	info->data_size -= do_bytes;
562 }
563 
pxa3xx_nand_irq_thread(struct pxa3xx_nand_info * info)564 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
565 {
566 	handle_data_pio(info);
567 
568 	info->state = STATE_CMD_DONE;
569 	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
570 }
571 
pxa3xx_nand_irq(struct pxa3xx_nand_info * info)572 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
573 {
574 	unsigned int status, is_completed = 0, is_ready = 0;
575 	unsigned int ready, cmd_done;
576 	irqreturn_t ret = IRQ_HANDLED;
577 
578 	if (info->cs == 0) {
579 		ready           = NDSR_FLASH_RDY;
580 		cmd_done        = NDSR_CS0_CMDD;
581 	} else {
582 		ready           = NDSR_RDY;
583 		cmd_done        = NDSR_CS1_CMDD;
584 	}
585 
586 	status = nand_readl(info, NDSR);
587 
588 	if (status & NDSR_UNCORERR)
589 		info->retcode = ERR_UNCORERR;
590 	if (status & NDSR_CORERR) {
591 		info->retcode = ERR_CORERR;
592 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
593 		    info->ecc_bch)
594 			info->ecc_err_cnt = NDSR_ERR_CNT(status);
595 		else
596 			info->ecc_err_cnt = 1;
597 
598 		/*
599 		 * Each chunk composing a page is corrected independently,
600 		 * and we need to store maximum number of corrected bitflips
601 		 * to return it to the MTD layer in ecc.read_page().
602 		 */
603 		info->max_bitflips = max_t(unsigned int,
604 					   info->max_bitflips,
605 					   info->ecc_err_cnt);
606 	}
607 	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
608 		info->state = (status & NDSR_RDDREQ) ?
609 			STATE_PIO_READING : STATE_PIO_WRITING;
610 		/* Call the IRQ thread in U-Boot directly */
611 		pxa3xx_nand_irq_thread(info);
612 		return 0;
613 	}
614 	if (status & cmd_done) {
615 		info->state = STATE_CMD_DONE;
616 		is_completed = 1;
617 	}
618 	if (status & ready) {
619 		info->state = STATE_READY;
620 		is_ready = 1;
621 	}
622 
623 	if (status & NDSR_WRCMDREQ) {
624 		nand_writel(info, NDSR, NDSR_WRCMDREQ);
625 		status &= ~NDSR_WRCMDREQ;
626 		info->state = STATE_CMD_HANDLE;
627 
628 		/*
629 		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
630 		 * must be loaded by writing directly either 12 or 16
631 		 * bytes directly to NDCB0, four bytes at a time.
632 		 *
633 		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
634 		 * but each NDCBx register can be read.
635 		 */
636 		nand_writel(info, NDCB0, info->ndcb0);
637 		nand_writel(info, NDCB0, info->ndcb1);
638 		nand_writel(info, NDCB0, info->ndcb2);
639 
640 		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
641 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
642 			nand_writel(info, NDCB0, info->ndcb3);
643 	}
644 
645 	/* clear NDSR to let the controller exit the IRQ */
646 	nand_writel(info, NDSR, status);
647 	if (is_completed)
648 		info->cmd_complete = 1;
649 	if (is_ready)
650 		info->dev_ready = 1;
651 
652 	return ret;
653 }
654 
is_buf_blank(uint8_t * buf,size_t len)655 static inline int is_buf_blank(uint8_t *buf, size_t len)
656 {
657 	for (; len > 0; len--)
658 		if (*buf++ != 0xff)
659 			return 0;
660 	return 1;
661 }
662 
set_command_address(struct pxa3xx_nand_info * info,unsigned int page_size,uint16_t column,int page_addr)663 static void set_command_address(struct pxa3xx_nand_info *info,
664 		unsigned int page_size, uint16_t column, int page_addr)
665 {
666 	/* small page addr setting */
667 	if (page_size < PAGE_CHUNK_SIZE) {
668 		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
669 				| (column & 0xFF);
670 
671 		info->ndcb2 = 0;
672 	} else {
673 		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
674 				| (column & 0xFFFF);
675 
676 		if (page_addr & 0xFF0000)
677 			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
678 		else
679 			info->ndcb2 = 0;
680 	}
681 }
682 
prepare_start_command(struct pxa3xx_nand_info * info,int command)683 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
684 {
685 	struct pxa3xx_nand_host *host = info->host[info->cs];
686 	struct mtd_info *mtd = host->mtd;
687 
688 	/* reset data and oob column point to handle data */
689 	info->buf_start		= 0;
690 	info->buf_count		= 0;
691 	info->oob_size		= 0;
692 	info->data_buff_pos	= 0;
693 	info->oob_buff_pos	= 0;
694 	info->use_ecc		= 0;
695 	info->use_spare		= 1;
696 	info->retcode		= ERR_NONE;
697 	info->ecc_err_cnt	= 0;
698 	info->ndcb3		= 0;
699 	info->need_wait		= 0;
700 
701 	switch (command) {
702 	case NAND_CMD_READ0:
703 	case NAND_CMD_PAGEPROG:
704 		info->use_ecc = 1;
705 	case NAND_CMD_READOOB:
706 		pxa3xx_set_datasize(info, mtd);
707 		break;
708 	case NAND_CMD_PARAM:
709 		info->use_spare = 0;
710 		break;
711 	default:
712 		info->ndcb1 = 0;
713 		info->ndcb2 = 0;
714 		break;
715 	}
716 
717 	/*
718 	 * If we are about to issue a read command, or about to set
719 	 * the write address, then clean the data buffer.
720 	 */
721 	if (command == NAND_CMD_READ0 ||
722 	    command == NAND_CMD_READOOB ||
723 	    command == NAND_CMD_SEQIN) {
724 		info->buf_count = mtd->writesize + mtd->oobsize;
725 		memset(info->data_buff, 0xFF, info->buf_count);
726 	}
727 }
728 
prepare_set_command(struct pxa3xx_nand_info * info,int command,int ext_cmd_type,uint16_t column,int page_addr)729 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
730 		int ext_cmd_type, uint16_t column, int page_addr)
731 {
732 	int addr_cycle, exec_cmd;
733 	struct pxa3xx_nand_host *host;
734 	struct mtd_info *mtd;
735 
736 	host = info->host[info->cs];
737 	mtd = host->mtd;
738 	addr_cycle = 0;
739 	exec_cmd = 1;
740 
741 	if (info->cs != 0)
742 		info->ndcb0 = NDCB0_CSEL;
743 	else
744 		info->ndcb0 = 0;
745 
746 	if (command == NAND_CMD_SEQIN)
747 		exec_cmd = 0;
748 
749 	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
750 				    + host->col_addr_cycles);
751 
752 	switch (command) {
753 	case NAND_CMD_READOOB:
754 	case NAND_CMD_READ0:
755 		info->buf_start = column;
756 		info->ndcb0 |= NDCB0_CMD_TYPE(0)
757 				| addr_cycle
758 				| NAND_CMD_READ0;
759 
760 		if (command == NAND_CMD_READOOB)
761 			info->buf_start += mtd->writesize;
762 
763 		/*
764 		 * Multiple page read needs an 'extended command type' field,
765 		 * which is either naked-read or last-read according to the
766 		 * state.
767 		 */
768 		if (mtd->writesize == PAGE_CHUNK_SIZE) {
769 			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
770 		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
771 			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
772 					| NDCB0_LEN_OVRD
773 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
774 			info->ndcb3 = info->chunk_size +
775 				      info->oob_size;
776 		}
777 
778 		set_command_address(info, mtd->writesize, column, page_addr);
779 		break;
780 
781 	case NAND_CMD_SEQIN:
782 
783 		info->buf_start = column;
784 		set_command_address(info, mtd->writesize, 0, page_addr);
785 
786 		/*
787 		 * Multiple page programming needs to execute the initial
788 		 * SEQIN command that sets the page address.
789 		 */
790 		if (mtd->writesize > PAGE_CHUNK_SIZE) {
791 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
792 				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
793 				| addr_cycle
794 				| command;
795 			/* No data transfer in this case */
796 			info->data_size = 0;
797 			exec_cmd = 1;
798 		}
799 		break;
800 
801 	case NAND_CMD_PAGEPROG:
802 		if (is_buf_blank(info->data_buff,
803 				 (mtd->writesize + mtd->oobsize))) {
804 			exec_cmd = 0;
805 			break;
806 		}
807 
808 		/* Second command setting for large pages */
809 		if (mtd->writesize > PAGE_CHUNK_SIZE) {
810 			/*
811 			 * Multiple page write uses the 'extended command'
812 			 * field. This can be used to issue a command dispatch
813 			 * or a naked-write depending on the current stage.
814 			 */
815 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
816 					| NDCB0_LEN_OVRD
817 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
818 			info->ndcb3 = info->chunk_size +
819 				      info->oob_size;
820 
821 			/*
822 			 * This is the command dispatch that completes a chunked
823 			 * page program operation.
824 			 */
825 			if (info->data_size == 0) {
826 				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
827 					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
828 					| command;
829 				info->ndcb1 = 0;
830 				info->ndcb2 = 0;
831 				info->ndcb3 = 0;
832 			}
833 		} else {
834 			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
835 					| NDCB0_AUTO_RS
836 					| NDCB0_ST_ROW_EN
837 					| NDCB0_DBC
838 					| (NAND_CMD_PAGEPROG << 8)
839 					| NAND_CMD_SEQIN
840 					| addr_cycle;
841 		}
842 		break;
843 
844 	case NAND_CMD_PARAM:
845 		info->buf_count = 256;
846 		info->ndcb0 |= NDCB0_CMD_TYPE(0)
847 				| NDCB0_ADDR_CYC(1)
848 				| NDCB0_LEN_OVRD
849 				| command;
850 		info->ndcb1 = (column & 0xFF);
851 		info->ndcb3 = 256;
852 		info->data_size = 256;
853 		break;
854 
855 	case NAND_CMD_READID:
856 		info->buf_count = host->read_id_bytes;
857 		info->ndcb0 |= NDCB0_CMD_TYPE(3)
858 				| NDCB0_ADDR_CYC(1)
859 				| command;
860 		info->ndcb1 = (column & 0xFF);
861 
862 		info->data_size = 8;
863 		break;
864 	case NAND_CMD_STATUS:
865 		info->buf_count = 1;
866 		info->ndcb0 |= NDCB0_CMD_TYPE(4)
867 				| NDCB0_ADDR_CYC(1)
868 				| command;
869 
870 		info->data_size = 8;
871 		break;
872 
873 	case NAND_CMD_ERASE1:
874 		info->ndcb0 |= NDCB0_CMD_TYPE(2)
875 				| NDCB0_AUTO_RS
876 				| NDCB0_ADDR_CYC(3)
877 				| NDCB0_DBC
878 				| (NAND_CMD_ERASE2 << 8)
879 				| NAND_CMD_ERASE1;
880 		info->ndcb1 = page_addr;
881 		info->ndcb2 = 0;
882 
883 		break;
884 	case NAND_CMD_RESET:
885 		info->ndcb0 |= NDCB0_CMD_TYPE(5)
886 				| command;
887 
888 		break;
889 
890 	case NAND_CMD_ERASE2:
891 		exec_cmd = 0;
892 		break;
893 
894 	default:
895 		exec_cmd = 0;
896 		dev_err(&info->pdev->dev, "non-supported command %x\n",
897 			command);
898 		break;
899 	}
900 
901 	return exec_cmd;
902 }
903 
nand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)904 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
905 			 int column, int page_addr)
906 {
907 	struct nand_chip *chip = mtd_to_nand(mtd);
908 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
909 	struct pxa3xx_nand_info *info = host->info_data;
910 	int exec_cmd;
911 
912 	/*
913 	 * if this is a x16 device ,then convert the input
914 	 * "byte" address into a "word" address appropriate
915 	 * for indexing a word-oriented device
916 	 */
917 	if (info->reg_ndcr & NDCR_DWIDTH_M)
918 		column /= 2;
919 
920 	/*
921 	 * There may be different NAND chip hooked to
922 	 * different chip select, so check whether
923 	 * chip select has been changed, if yes, reset the timing
924 	 */
925 	if (info->cs != host->cs) {
926 		info->cs = host->cs;
927 		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
928 		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
929 	}
930 
931 	prepare_start_command(info, command);
932 
933 	info->state = STATE_PREPARED;
934 	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
935 
936 	if (exec_cmd) {
937 		u32 ts;
938 
939 		info->cmd_complete = 0;
940 		info->dev_ready = 0;
941 		info->need_wait = 1;
942 		pxa3xx_nand_start(info);
943 
944 		ts = get_timer(0);
945 		while (1) {
946 			u32 status;
947 
948 			status = nand_readl(info, NDSR);
949 			if (status)
950 				pxa3xx_nand_irq(info);
951 
952 			if (info->cmd_complete)
953 				break;
954 
955 			if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
956 				dev_err(&info->pdev->dev, "Wait timeout!!!\n");
957 				return;
958 			}
959 		}
960 	}
961 	info->state = STATE_IDLE;
962 }
963 
nand_cmdfunc_extended(struct mtd_info * mtd,const unsigned command,int column,int page_addr)964 static void nand_cmdfunc_extended(struct mtd_info *mtd,
965 				  const unsigned command,
966 				  int column, int page_addr)
967 {
968 	struct nand_chip *chip = mtd_to_nand(mtd);
969 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
970 	struct pxa3xx_nand_info *info = host->info_data;
971 	int exec_cmd, ext_cmd_type;
972 
973 	/*
974 	 * if this is a x16 device then convert the input
975 	 * "byte" address into a "word" address appropriate
976 	 * for indexing a word-oriented device
977 	 */
978 	if (info->reg_ndcr & NDCR_DWIDTH_M)
979 		column /= 2;
980 
981 	/*
982 	 * There may be different NAND chip hooked to
983 	 * different chip select, so check whether
984 	 * chip select has been changed, if yes, reset the timing
985 	 */
986 	if (info->cs != host->cs) {
987 		info->cs = host->cs;
988 		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
989 		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
990 	}
991 
992 	/* Select the extended command for the first command */
993 	switch (command) {
994 	case NAND_CMD_READ0:
995 	case NAND_CMD_READOOB:
996 		ext_cmd_type = EXT_CMD_TYPE_MONO;
997 		break;
998 	case NAND_CMD_SEQIN:
999 		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1000 		break;
1001 	case NAND_CMD_PAGEPROG:
1002 		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1003 		break;
1004 	default:
1005 		ext_cmd_type = 0;
1006 		break;
1007 	}
1008 
1009 	prepare_start_command(info, command);
1010 
1011 	/*
1012 	 * Prepare the "is ready" completion before starting a command
1013 	 * transaction sequence. If the command is not executed the
1014 	 * completion will be completed, see below.
1015 	 *
1016 	 * We can do that inside the loop because the command variable
1017 	 * is invariant and thus so is the exec_cmd.
1018 	 */
1019 	info->need_wait = 1;
1020 	info->dev_ready = 0;
1021 
1022 	do {
1023 		u32 ts;
1024 
1025 		info->state = STATE_PREPARED;
1026 		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1027 					       column, page_addr);
1028 		if (!exec_cmd) {
1029 			info->need_wait = 0;
1030 			info->dev_ready = 1;
1031 			break;
1032 		}
1033 
1034 		info->cmd_complete = 0;
1035 		pxa3xx_nand_start(info);
1036 
1037 		ts = get_timer(0);
1038 		while (1) {
1039 			u32 status;
1040 
1041 			status = nand_readl(info, NDSR);
1042 			if (status)
1043 				pxa3xx_nand_irq(info);
1044 
1045 			if (info->cmd_complete)
1046 				break;
1047 
1048 			if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1049 				dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1050 				return;
1051 			}
1052 		}
1053 
1054 		/* Check if the sequence is complete */
1055 		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1056 			break;
1057 
1058 		/*
1059 		 * After a splitted program command sequence has issued
1060 		 * the command dispatch, the command sequence is complete.
1061 		 */
1062 		if (info->data_size == 0 &&
1063 		    command == NAND_CMD_PAGEPROG &&
1064 		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1065 			break;
1066 
1067 		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1068 			/* Last read: issue a 'last naked read' */
1069 			if (info->data_size == info->chunk_size)
1070 				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1071 			else
1072 				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1073 
1074 		/*
1075 		 * If a splitted program command has no more data to transfer,
1076 		 * the command dispatch must be issued to complete.
1077 		 */
1078 		} else if (command == NAND_CMD_PAGEPROG &&
1079 			   info->data_size == 0) {
1080 				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1081 		}
1082 	} while (1);
1083 
1084 	info->state = STATE_IDLE;
1085 }
1086 
pxa3xx_nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1087 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1088 		struct nand_chip *chip, const uint8_t *buf, int oob_required,
1089 		int page)
1090 {
1091 	chip->write_buf(mtd, buf, mtd->writesize);
1092 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1093 
1094 	return 0;
1095 }
1096 
pxa3xx_nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1097 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1098 		struct nand_chip *chip, uint8_t *buf, int oob_required,
1099 		int page)
1100 {
1101 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1102 	struct pxa3xx_nand_info *info = host->info_data;
1103 
1104 	chip->read_buf(mtd, buf, mtd->writesize);
1105 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1106 
1107 	if (info->retcode == ERR_CORERR && info->use_ecc) {
1108 		mtd->ecc_stats.corrected += info->ecc_err_cnt;
1109 
1110 	} else if (info->retcode == ERR_UNCORERR) {
1111 		/*
1112 		 * for blank page (all 0xff), HW will calculate its ECC as
1113 		 * 0, which is different from the ECC information within
1114 		 * OOB, ignore such uncorrectable errors
1115 		 */
1116 		if (is_buf_blank(buf, mtd->writesize))
1117 			info->retcode = ERR_NONE;
1118 		else
1119 			mtd->ecc_stats.failed++;
1120 	}
1121 
1122 	return info->max_bitflips;
1123 }
1124 
pxa3xx_nand_read_byte(struct mtd_info * mtd)1125 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1126 {
1127 	struct nand_chip *chip = mtd_to_nand(mtd);
1128 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1129 	struct pxa3xx_nand_info *info = host->info_data;
1130 	char retval = 0xFF;
1131 
1132 	if (info->buf_start < info->buf_count)
1133 		/* Has just send a new command? */
1134 		retval = info->data_buff[info->buf_start++];
1135 
1136 	return retval;
1137 }
1138 
pxa3xx_nand_read_word(struct mtd_info * mtd)1139 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1140 {
1141 	struct nand_chip *chip = mtd_to_nand(mtd);
1142 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1143 	struct pxa3xx_nand_info *info = host->info_data;
1144 	u16 retval = 0xFFFF;
1145 
1146 	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1147 		retval = *((u16 *)(info->data_buff+info->buf_start));
1148 		info->buf_start += 2;
1149 	}
1150 	return retval;
1151 }
1152 
pxa3xx_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1153 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1154 {
1155 	struct nand_chip *chip = mtd_to_nand(mtd);
1156 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1157 	struct pxa3xx_nand_info *info = host->info_data;
1158 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1159 
1160 	memcpy(buf, info->data_buff + info->buf_start, real_len);
1161 	info->buf_start += real_len;
1162 }
1163 
pxa3xx_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1164 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1165 		const uint8_t *buf, int len)
1166 {
1167 	struct nand_chip *chip = mtd_to_nand(mtd);
1168 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1169 	struct pxa3xx_nand_info *info = host->info_data;
1170 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1171 
1172 	memcpy(info->data_buff + info->buf_start, buf, real_len);
1173 	info->buf_start += real_len;
1174 }
1175 
pxa3xx_nand_select_chip(struct mtd_info * mtd,int chip)1176 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1177 {
1178 	return;
1179 }
1180 
pxa3xx_nand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1181 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1182 {
1183 	struct nand_chip *chip = mtd_to_nand(mtd);
1184 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1185 	struct pxa3xx_nand_info *info = host->info_data;
1186 
1187 	if (info->need_wait) {
1188 		u32 ts;
1189 
1190 		info->need_wait = 0;
1191 
1192 		ts = get_timer(0);
1193 		while (1) {
1194 			u32 status;
1195 
1196 			status = nand_readl(info, NDSR);
1197 			if (status)
1198 				pxa3xx_nand_irq(info);
1199 
1200 			if (info->dev_ready)
1201 				break;
1202 
1203 			if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1204 				dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1205 				return NAND_STATUS_FAIL;
1206 			}
1207 		}
1208 	}
1209 
1210 	/* pxa3xx_nand_send_command has waited for command complete */
1211 	if (this->state == FL_WRITING || this->state == FL_ERASING) {
1212 		if (info->retcode == ERR_NONE)
1213 			return 0;
1214 		else
1215 			return NAND_STATUS_FAIL;
1216 	}
1217 
1218 	return NAND_STATUS_READY;
1219 }
1220 
pxa3xx_nand_config_flash(struct pxa3xx_nand_info * info)1221 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1222 {
1223 	struct pxa3xx_nand_host *host = info->host[info->cs];
1224 	struct mtd_info *mtd = host->mtd;
1225 	struct nand_chip *chip = mtd_to_nand(mtd);
1226 
1227 	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1228 	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1229 	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1230 
1231 	return 0;
1232 }
1233 
pxa3xx_nand_detect_config(struct pxa3xx_nand_info * info)1234 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1235 {
1236 	/*
1237 	 * We set 0 by hard coding here, for we don't support keep_config
1238 	 * when there is more than one chip attached to the controller
1239 	 */
1240 	struct pxa3xx_nand_host *host = info->host[0];
1241 	uint32_t ndcr = nand_readl(info, NDCR);
1242 
1243 	if (ndcr & NDCR_PAGE_SZ) {
1244 		/* Controller's FIFO size */
1245 		info->chunk_size = 2048;
1246 		host->read_id_bytes = 4;
1247 	} else {
1248 		info->chunk_size = 512;
1249 		host->read_id_bytes = 2;
1250 	}
1251 
1252 	/* Set an initial chunk size */
1253 	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1254 	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1255 	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1256 	return 0;
1257 }
1258 
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1259 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1260 {
1261 	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1262 	if (info->data_buff == NULL)
1263 		return -ENOMEM;
1264 	return 0;
1265 }
1266 
pxa3xx_nand_sensing(struct pxa3xx_nand_host * host)1267 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1268 {
1269 	struct pxa3xx_nand_info *info = host->info_data;
1270 	struct pxa3xx_nand_platform_data *pdata = info->pdata;
1271 	struct mtd_info *mtd;
1272 	struct nand_chip *chip;
1273 	const struct nand_sdr_timings *timings;
1274 	int ret;
1275 
1276 	mtd = info->host[info->cs]->mtd;
1277 	chip = mtd_to_nand(mtd);
1278 
1279 	/* configure default flash values */
1280 	info->reg_ndcr = 0x0; /* enable all interrupts */
1281 	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1282 	info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1283 	info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1284 
1285 	/* use the common timing to make a try */
1286 	timings = onfi_async_timing_mode_to_sdr_timings(0);
1287 	if (IS_ERR(timings))
1288 		return PTR_ERR(timings);
1289 
1290 	pxa3xx_nand_set_sdr_timing(host, timings);
1291 
1292 	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1293 	ret = chip->waitfunc(mtd, chip);
1294 	if (ret & NAND_STATUS_FAIL)
1295 		return -ENODEV;
1296 
1297 	return 0;
1298 }
1299 
pxa_ecc_init(struct pxa3xx_nand_info * info,struct nand_ecc_ctrl * ecc,int strength,int ecc_stepsize,int page_size)1300 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1301 			struct nand_ecc_ctrl *ecc,
1302 			int strength, int ecc_stepsize, int page_size)
1303 {
1304 	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1305 		info->chunk_size = 2048;
1306 		info->spare_size = 40;
1307 		info->ecc_size = 24;
1308 		ecc->mode = NAND_ECC_HW;
1309 		ecc->size = 512;
1310 		ecc->strength = 1;
1311 
1312 	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1313 		info->chunk_size = 512;
1314 		info->spare_size = 8;
1315 		info->ecc_size = 8;
1316 		ecc->mode = NAND_ECC_HW;
1317 		ecc->size = 512;
1318 		ecc->strength = 1;
1319 
1320 	/*
1321 	 * Required ECC: 4-bit correction per 512 bytes
1322 	 * Select: 16-bit correction per 2048 bytes
1323 	 */
1324 	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1325 		info->ecc_bch = 1;
1326 		info->chunk_size = 2048;
1327 		info->spare_size = 32;
1328 		info->ecc_size = 32;
1329 		ecc->mode = NAND_ECC_HW;
1330 		ecc->size = info->chunk_size;
1331 		ecc->layout = &ecc_layout_2KB_bch4bit;
1332 		ecc->strength = 16;
1333 
1334 	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1335 		info->ecc_bch = 1;
1336 		info->chunk_size = 2048;
1337 		info->spare_size = 32;
1338 		info->ecc_size = 32;
1339 		ecc->mode = NAND_ECC_HW;
1340 		ecc->size = info->chunk_size;
1341 		ecc->layout = &ecc_layout_4KB_bch4bit;
1342 		ecc->strength = 16;
1343 
1344 	/*
1345 	 * Required ECC: 8-bit correction per 512 bytes
1346 	 * Select: 16-bit correction per 1024 bytes
1347 	 */
1348 	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1349 		info->ecc_bch = 1;
1350 		info->chunk_size = 1024;
1351 		info->spare_size = 0;
1352 		info->ecc_size = 32;
1353 		ecc->mode = NAND_ECC_HW;
1354 		ecc->size = info->chunk_size;
1355 		ecc->layout = &ecc_layout_4KB_bch8bit;
1356 		ecc->strength = 16;
1357 	} else {
1358 		dev_err(&info->pdev->dev,
1359 			"ECC strength %d at page size %d is not supported\n",
1360 			strength, page_size);
1361 		return -ENODEV;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
pxa3xx_nand_scan(struct mtd_info * mtd)1367 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1368 {
1369 	struct nand_chip *chip = mtd_to_nand(mtd);
1370 	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1371 	struct pxa3xx_nand_info *info = host->info_data;
1372 	struct pxa3xx_nand_platform_data *pdata = info->pdata;
1373 	int ret;
1374 	uint16_t ecc_strength, ecc_step;
1375 
1376 	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1377 		goto KEEP_CONFIG;
1378 
1379 	/* Set a default chunk size */
1380 	info->chunk_size = 512;
1381 
1382 	ret = pxa3xx_nand_sensing(host);
1383 	if (ret) {
1384 		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1385 			 info->cs);
1386 
1387 		return ret;
1388 	}
1389 
1390 KEEP_CONFIG:
1391 	/* Device detection must be done with ECC disabled */
1392 	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1393 		nand_writel(info, NDECCCTRL, 0x0);
1394 
1395 	if (nand_scan_ident(mtd, 1, NULL))
1396 		return -ENODEV;
1397 
1398 	if (!pdata->keep_config) {
1399 		ret = pxa3xx_nand_init_timings(host);
1400 		if (ret) {
1401 			dev_err(&info->pdev->dev,
1402 				"Failed to set timings: %d\n", ret);
1403 			return ret;
1404 		}
1405 	}
1406 
1407 	ret = pxa3xx_nand_config_flash(info);
1408 	if (ret)
1409 		return ret;
1410 
1411 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1412 	/*
1413 	 * We'll use a bad block table stored in-flash and don't
1414 	 * allow writing the bad block marker to the flash.
1415 	 */
1416 	chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1417 	chip->bbt_td = &bbt_main_descr;
1418 	chip->bbt_md = &bbt_mirror_descr;
1419 #endif
1420 
1421 	/*
1422 	 * If the page size is bigger than the FIFO size, let's check
1423 	 * we are given the right variant and then switch to the extended
1424 	 * (aka splitted) command handling,
1425 	 */
1426 	if (mtd->writesize > PAGE_CHUNK_SIZE) {
1427 		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1428 			chip->cmdfunc = nand_cmdfunc_extended;
1429 		} else {
1430 			dev_err(&info->pdev->dev,
1431 				"unsupported page size on this variant\n");
1432 			return -ENODEV;
1433 		}
1434 	}
1435 
1436 	if (pdata->ecc_strength && pdata->ecc_step_size) {
1437 		ecc_strength = pdata->ecc_strength;
1438 		ecc_step = pdata->ecc_step_size;
1439 	} else {
1440 		ecc_strength = chip->ecc_strength_ds;
1441 		ecc_step = chip->ecc_step_ds;
1442 	}
1443 
1444 	/* Set default ECC strength requirements on non-ONFI devices */
1445 	if (ecc_strength < 1 && ecc_step < 1) {
1446 		ecc_strength = 1;
1447 		ecc_step = 512;
1448 	}
1449 
1450 	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1451 			   ecc_step, mtd->writesize);
1452 	if (ret)
1453 		return ret;
1454 
1455 	/* calculate addressing information */
1456 	if (mtd->writesize >= 2048)
1457 		host->col_addr_cycles = 2;
1458 	else
1459 		host->col_addr_cycles = 1;
1460 
1461 	/* release the initial buffer */
1462 	kfree(info->data_buff);
1463 
1464 	/* allocate the real data + oob buffer */
1465 	info->buf_size = mtd->writesize + mtd->oobsize;
1466 	ret = pxa3xx_nand_init_buff(info);
1467 	if (ret)
1468 		return ret;
1469 	info->oob_buff = info->data_buff + mtd->writesize;
1470 
1471 	if ((mtd->size >> chip->page_shift) > 65536)
1472 		host->row_addr_cycles = 3;
1473 	else
1474 		host->row_addr_cycles = 2;
1475 	return nand_scan_tail(mtd);
1476 }
1477 
alloc_nand_resource(struct pxa3xx_nand_info * info)1478 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1479 {
1480 	struct pxa3xx_nand_platform_data *pdata;
1481 	struct pxa3xx_nand_host *host;
1482 	struct nand_chip *chip = NULL;
1483 	struct mtd_info *mtd;
1484 	int ret, cs;
1485 
1486 	pdata = info->pdata;
1487 	if (pdata->num_cs <= 0)
1488 		return -ENODEV;
1489 
1490 	info->variant = pxa3xx_nand_get_variant();
1491 	for (cs = 0; cs < pdata->num_cs; cs++) {
1492 		chip = (struct nand_chip *)
1493 			((u8 *)&info[1] + sizeof(*host) * cs);
1494 		mtd = nand_to_mtd(chip);
1495 		host = (struct pxa3xx_nand_host *)chip;
1496 		info->host[cs] = host;
1497 		host->mtd = mtd;
1498 		host->cs = cs;
1499 		host->info_data = info;
1500 		host->read_id_bytes = 4;
1501 		mtd->owner = THIS_MODULE;
1502 
1503 		nand_set_controller_data(chip, host);
1504 		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
1505 		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
1506 		chip->controller        = &info->controller;
1507 		chip->waitfunc		= pxa3xx_nand_waitfunc;
1508 		chip->select_chip	= pxa3xx_nand_select_chip;
1509 		chip->read_word		= pxa3xx_nand_read_word;
1510 		chip->read_byte		= pxa3xx_nand_read_byte;
1511 		chip->read_buf		= pxa3xx_nand_read_buf;
1512 		chip->write_buf		= pxa3xx_nand_write_buf;
1513 		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1514 		chip->cmdfunc		= nand_cmdfunc;
1515 	}
1516 
1517 	/* Allocate a buffer to allow flash detection */
1518 	info->buf_size = INIT_BUFFER_SIZE;
1519 	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1520 	if (info->data_buff == NULL) {
1521 		ret = -ENOMEM;
1522 		goto fail_disable_clk;
1523 	}
1524 
1525 	/* initialize all interrupts to be disabled */
1526 	disable_int(info, NDSR_MASK);
1527 
1528 	return 0;
1529 
1530 	kfree(info->data_buff);
1531 fail_disable_clk:
1532 	return ret;
1533 }
1534 
pxa3xx_nand_probe_dt(struct pxa3xx_nand_info * info)1535 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1536 {
1537 	struct pxa3xx_nand_platform_data *pdata;
1538 	const void *blob = gd->fdt_blob;
1539 	int node = -1;
1540 
1541 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1542 	if (!pdata)
1543 		return -ENOMEM;
1544 
1545 	/* Get address decoding nodes from the FDT blob */
1546 	do {
1547 		node = fdt_node_offset_by_compatible(blob, node,
1548 						     "marvell,mvebu-pxa3xx-nand");
1549 		if (node < 0)
1550 			break;
1551 
1552 		/* Bypass disabeld nodes */
1553 		if (!fdtdec_get_is_enabled(blob, node))
1554 			continue;
1555 
1556 		/* Get the first enabled NAND controler base address */
1557 		info->mmio_base =
1558 			(void __iomem *)fdtdec_get_addr_size_auto_noparent(
1559 					blob, node, "reg", 0, NULL, true);
1560 
1561 		pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1562 		if (pdata->num_cs != 1) {
1563 			pr_err("pxa3xx driver supports single CS only\n");
1564 			break;
1565 		}
1566 
1567 		if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1568 			pdata->enable_arbiter = 1;
1569 
1570 		if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1571 			pdata->keep_config = 1;
1572 
1573 		/*
1574 		 * ECC parameters.
1575 		 * If these are not set, they will be selected according
1576 		 * to the detected flash type.
1577 		 */
1578 		/* ECC strength */
1579 		pdata->ecc_strength = fdtdec_get_int(blob, node,
1580 						     "nand-ecc-strength", 0);
1581 
1582 		/* ECC step size */
1583 		pdata->ecc_step_size = fdtdec_get_int(blob, node,
1584 						      "nand-ecc-step-size", 0);
1585 
1586 		info->pdata = pdata;
1587 
1588 		/* Currently support only a single NAND controller */
1589 		return 0;
1590 
1591 	} while (node >= 0);
1592 
1593 	return -EINVAL;
1594 }
1595 
pxa3xx_nand_probe(struct pxa3xx_nand_info * info)1596 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1597 {
1598 	struct pxa3xx_nand_platform_data *pdata;
1599 	int ret, cs, probe_success;
1600 
1601 	ret = pxa3xx_nand_probe_dt(info);
1602 	if (ret)
1603 		return ret;
1604 
1605 	pdata = info->pdata;
1606 
1607 	ret = alloc_nand_resource(info);
1608 	if (ret) {
1609 		dev_err(&pdev->dev, "alloc nand resource failed\n");
1610 		return ret;
1611 	}
1612 
1613 	probe_success = 0;
1614 	for (cs = 0; cs < pdata->num_cs; cs++) {
1615 		struct mtd_info *mtd = info->host[cs]->mtd;
1616 
1617 		/*
1618 		 * The mtd name matches the one used in 'mtdparts' kernel
1619 		 * parameter. This name cannot be changed or otherwise
1620 		 * user's mtd partitions configuration would get broken.
1621 		 */
1622 		mtd->name = "pxa3xx_nand-0";
1623 		info->cs = cs;
1624 		ret = pxa3xx_nand_scan(mtd);
1625 		if (ret) {
1626 			dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1627 				 cs);
1628 			continue;
1629 		}
1630 
1631 		if (nand_register(cs, mtd))
1632 			continue;
1633 
1634 		probe_success = 1;
1635 	}
1636 
1637 	if (!probe_success)
1638 		return -ENODEV;
1639 
1640 	return 0;
1641 }
1642 
1643 /*
1644  * Main initialization routine
1645  */
board_nand_init(void)1646 void board_nand_init(void)
1647 {
1648 	struct pxa3xx_nand_info *info;
1649 	struct pxa3xx_nand_host *host;
1650 	int ret;
1651 
1652 	info = kzalloc(sizeof(*info) +
1653 		       sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1654 		       GFP_KERNEL);
1655 	if (!info)
1656 		return;
1657 
1658 	ret = pxa3xx_nand_probe(info);
1659 	if (ret)
1660 		return;
1661 }
1662