• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Freescale i.MX28 NAND flash driver
4  *
5  * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6  * on behalf of DENX Software Engineering GmbH
7  *
8  * Based on code from LTIB:
9  * Freescale GPMI NFC NAND Flash Driver
10  *
11  * Copyright (C) 2010 Freescale Semiconductor, Inc.
12  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
13  */
14 
15 #include <common.h>
16 #include <cpu_func.h>
17 #include <dm.h>
18 #include <linux/mtd/rawnand.h>
19 #include <linux/sizes.h>
20 #include <linux/types.h>
21 #include <malloc.h>
22 #include <linux/errno.h>
23 #include <asm/io.h>
24 #include <asm/arch/clock.h>
25 #include <asm/arch/imx-regs.h>
26 #include <asm/mach-imx/regs-bch.h>
27 #include <asm/mach-imx/regs-gpmi.h>
28 #include <asm/arch/sys_proto.h>
29 #include <mxs_nand.h>
30 
31 #define	MXS_NAND_DMA_DESCRIPTOR_COUNT		4
32 
33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7))
34 #define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	2
35 #else
36 #define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	0
37 #endif
38 #define	MXS_NAND_METADATA_SIZE			10
39 #define	MXS_NAND_BITS_PER_ECC_LEVEL		13
40 
41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
42 #define	MXS_NAND_COMMAND_BUFFER_SIZE		32
43 #else
44 #define	MXS_NAND_COMMAND_BUFFER_SIZE		CONFIG_SYS_CACHELINE_SIZE
45 #endif
46 
47 #define	MXS_NAND_BCH_TIMEOUT			10000
48 
49 struct nand_ecclayout fake_ecc_layout;
50 
51 /*
52  * Cache management functions
53  */
54 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
mxs_nand_flush_data_buf(struct mxs_nand_info * info)55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
56 {
57 	uint32_t addr = (uint32_t)info->data_buf;
58 
59 	flush_dcache_range(addr, addr + info->data_buf_size);
60 }
61 
mxs_nand_inval_data_buf(struct mxs_nand_info * info)62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
63 {
64 	uint32_t addr = (uint32_t)info->data_buf;
65 
66 	invalidate_dcache_range(addr, addr + info->data_buf_size);
67 }
68 
mxs_nand_flush_cmd_buf(struct mxs_nand_info * info)69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
70 {
71 	uint32_t addr = (uint32_t)info->cmd_buf;
72 
73 	flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
74 }
75 #else
mxs_nand_flush_data_buf(struct mxs_nand_info * info)76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
mxs_nand_inval_data_buf(struct mxs_nand_info * info)77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
mxs_nand_flush_cmd_buf(struct mxs_nand_info * info)78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
79 #endif
80 
mxs_nand_get_dma_desc(struct mxs_nand_info * info)81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
82 {
83 	struct mxs_dma_desc *desc;
84 
85 	if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
86 		printf("MXS NAND: Too many DMA descriptors requested\n");
87 		return NULL;
88 	}
89 
90 	desc = info->desc[info->desc_index];
91 	info->desc_index++;
92 
93 	return desc;
94 }
95 
mxs_nand_return_dma_descs(struct mxs_nand_info * info)96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
97 {
98 	int i;
99 	struct mxs_dma_desc *desc;
100 
101 	for (i = 0; i < info->desc_index; i++) {
102 		desc = info->desc[i];
103 		memset(desc, 0, sizeof(struct mxs_dma_desc));
104 		desc->address = (dma_addr_t)desc;
105 	}
106 
107 	info->desc_index = 0;
108 }
109 
mxs_nand_aux_status_offset(void)110 static uint32_t mxs_nand_aux_status_offset(void)
111 {
112 	return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
113 }
114 
mxs_nand_calc_mark_offset(struct bch_geometry * geo,uint32_t page_data_size)115 static inline int mxs_nand_calc_mark_offset(struct bch_geometry *geo,
116 					    uint32_t page_data_size)
117 {
118 	uint32_t chunk_data_size_in_bits = geo->ecc_chunk_size * 8;
119 	uint32_t chunk_ecc_size_in_bits = geo->ecc_strength * geo->gf_len;
120 	uint32_t chunk_total_size_in_bits;
121 	uint32_t block_mark_chunk_number;
122 	uint32_t block_mark_chunk_bit_offset;
123 	uint32_t block_mark_bit_offset;
124 
125 	chunk_total_size_in_bits =
126 			chunk_data_size_in_bits + chunk_ecc_size_in_bits;
127 
128 	/* Compute the bit offset of the block mark within the physical page. */
129 	block_mark_bit_offset = page_data_size * 8;
130 
131 	/* Subtract the metadata bits. */
132 	block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8;
133 
134 	/*
135 	 * Compute the chunk number (starting at zero) in which the block mark
136 	 * appears.
137 	 */
138 	block_mark_chunk_number =
139 			block_mark_bit_offset / chunk_total_size_in_bits;
140 
141 	/*
142 	 * Compute the bit offset of the block mark within its chunk, and
143 	 * validate it.
144 	 */
145 	block_mark_chunk_bit_offset = block_mark_bit_offset -
146 			(block_mark_chunk_number * chunk_total_size_in_bits);
147 
148 	if (block_mark_chunk_bit_offset > chunk_data_size_in_bits)
149 		return -EINVAL;
150 
151 	/*
152 	 * Now that we know the chunk number in which the block mark appears,
153 	 * we can subtract all the ECC bits that appear before it.
154 	 */
155 	block_mark_bit_offset -=
156 		block_mark_chunk_number * chunk_ecc_size_in_bits;
157 
158 	geo->block_mark_byte_offset = block_mark_bit_offset >> 3;
159 	geo->block_mark_bit_offset = block_mark_bit_offset & 0x7;
160 
161 	return 0;
162 }
163 
mxs_nand_calc_ecc_layout_by_info(struct bch_geometry * geo,struct mtd_info * mtd,unsigned int ecc_strength,unsigned int ecc_step)164 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
165 						   struct mtd_info *mtd,
166 						   unsigned int ecc_strength,
167 						   unsigned int ecc_step)
168 {
169 	struct nand_chip *chip = mtd_to_nand(mtd);
170 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
171 
172 	switch (ecc_step) {
173 	case SZ_512:
174 		geo->gf_len = 13;
175 		break;
176 	case SZ_1K:
177 		geo->gf_len = 14;
178 		break;
179 	default:
180 		return -EINVAL;
181 	}
182 
183 	geo->ecc_chunk_size = ecc_step;
184 	geo->ecc_strength = round_up(ecc_strength, 2);
185 
186 	/* Keep the C >= O */
187 	if (geo->ecc_chunk_size < mtd->oobsize)
188 		return -EINVAL;
189 
190 	if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
191 		return -EINVAL;
192 
193 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
194 
195 	return 0;
196 }
197 
mxs_nand_calc_ecc_layout(struct bch_geometry * geo,struct mtd_info * mtd)198 static inline int mxs_nand_calc_ecc_layout(struct bch_geometry *geo,
199 					   struct mtd_info *mtd)
200 {
201 	struct nand_chip *chip = mtd_to_nand(mtd);
202 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
203 
204 	/* The default for the length of Galois Field. */
205 	geo->gf_len = 13;
206 
207 	/* The default for chunk size. */
208 	geo->ecc_chunk_size = 512;
209 
210 	if (geo->ecc_chunk_size < mtd->oobsize) {
211 		geo->gf_len = 14;
212 		geo->ecc_chunk_size *= 2;
213 	}
214 
215 	if (mtd->oobsize > geo->ecc_chunk_size) {
216 		printf("Not support the NAND chips whose oob size is larger then %d bytes!\n",
217 		       geo->ecc_chunk_size);
218 		return -EINVAL;
219 	}
220 
221 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
222 
223 	/*
224 	 * Determine the ECC layout with the formula:
225 	 *	ECC bits per chunk = (total page spare data bits) /
226 	 *		(bits per ECC level) / (chunks per page)
227 	 * where:
228 	 *	total page spare data bits =
229 	 *		(page oob size - meta data size) * (bits per byte)
230 	 */
231 	geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
232 			/ (geo->gf_len * geo->ecc_chunk_count);
233 
234 	geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
235 				nand_info->max_ecc_strength_supported);
236 
237 	return 0;
238 }
239 
240 /*
241  * Wait for BCH complete IRQ and clear the IRQ
242  */
mxs_nand_wait_for_bch_complete(struct mxs_nand_info * nand_info)243 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
244 {
245 	int timeout = MXS_NAND_BCH_TIMEOUT;
246 	int ret;
247 
248 	ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
249 		BCH_CTRL_COMPLETE_IRQ, timeout);
250 
251 	writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
252 
253 	return ret;
254 }
255 
256 /*
257  * This is the function that we install in the cmd_ctrl function pointer of the
258  * owning struct nand_chip. The only functions in the reference implementation
259  * that use these functions pointers are cmdfunc and select_chip.
260  *
261  * In this driver, we implement our own select_chip, so this function will only
262  * be called by the reference implementation's cmdfunc. For this reason, we can
263  * ignore the chip enable bit and concentrate only on sending bytes to the NAND
264  * Flash.
265  */
mxs_nand_cmd_ctrl(struct mtd_info * mtd,int data,unsigned int ctrl)266 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
267 {
268 	struct nand_chip *nand = mtd_to_nand(mtd);
269 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
270 	struct mxs_dma_desc *d;
271 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
272 	int ret;
273 
274 	/*
275 	 * If this condition is true, something is _VERY_ wrong in MTD
276 	 * subsystem!
277 	 */
278 	if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
279 		printf("MXS NAND: Command queue too long\n");
280 		return;
281 	}
282 
283 	/*
284 	 * Every operation begins with a command byte and a series of zero or
285 	 * more address bytes. These are distinguished by either the Address
286 	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
287 	 * asserted. When MTD is ready to execute the command, it will
288 	 * deasert both latch enables.
289 	 *
290 	 * Rather than run a separate DMA operation for every single byte, we
291 	 * queue them up and run a single DMA operation for the entire series
292 	 * of command and data bytes.
293 	 */
294 	if (ctrl & (NAND_ALE | NAND_CLE)) {
295 		if (data != NAND_CMD_NONE)
296 			nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
297 		return;
298 	}
299 
300 	/*
301 	 * If control arrives here, MTD has deasserted both the ALE and CLE,
302 	 * which means it's ready to run an operation. Check if we have any
303 	 * bytes to send.
304 	 */
305 	if (nand_info->cmd_queue_len == 0)
306 		return;
307 
308 	/* Compile the DMA descriptor -- a descriptor that sends command. */
309 	d = mxs_nand_get_dma_desc(nand_info);
310 	d->cmd.data =
311 		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
312 		MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
313 		MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
314 		(nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
315 
316 	d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
317 
318 	d->cmd.pio_words[0] =
319 		GPMI_CTRL0_COMMAND_MODE_WRITE |
320 		GPMI_CTRL0_WORD_LENGTH |
321 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
322 		GPMI_CTRL0_ADDRESS_NAND_CLE |
323 		GPMI_CTRL0_ADDRESS_INCREMENT |
324 		nand_info->cmd_queue_len;
325 
326 	mxs_dma_desc_append(channel, d);
327 
328 	/* Flush caches */
329 	mxs_nand_flush_cmd_buf(nand_info);
330 
331 	/* Execute the DMA chain. */
332 	ret = mxs_dma_go(channel);
333 	if (ret)
334 		printf("MXS NAND: Error sending command\n");
335 
336 	mxs_nand_return_dma_descs(nand_info);
337 
338 	/* Reset the command queue. */
339 	nand_info->cmd_queue_len = 0;
340 }
341 
342 /*
343  * Test if the NAND flash is ready.
344  */
mxs_nand_device_ready(struct mtd_info * mtd)345 static int mxs_nand_device_ready(struct mtd_info *mtd)
346 {
347 	struct nand_chip *chip = mtd_to_nand(mtd);
348 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
349 	uint32_t tmp;
350 
351 	tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
352 	tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
353 
354 	return tmp & 1;
355 }
356 
357 /*
358  * Select the NAND chip.
359  */
mxs_nand_select_chip(struct mtd_info * mtd,int chip)360 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
361 {
362 	struct nand_chip *nand = mtd_to_nand(mtd);
363 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
364 
365 	nand_info->cur_chip = chip;
366 }
367 
368 /*
369  * Handle block mark swapping.
370  *
371  * Note that, when this function is called, it doesn't know whether it's
372  * swapping the block mark, or swapping it *back* -- but it doesn't matter
373  * because the the operation is the same.
374  */
mxs_nand_swap_block_mark(struct bch_geometry * geo,uint8_t * data_buf,uint8_t * oob_buf)375 static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
376 				     uint8_t *data_buf, uint8_t *oob_buf)
377 {
378 	uint32_t bit_offset = geo->block_mark_bit_offset;
379 	uint32_t buf_offset = geo->block_mark_byte_offset;
380 
381 	uint32_t src;
382 	uint32_t dst;
383 
384 	/*
385 	 * Get the byte from the data area that overlays the block mark. Since
386 	 * the ECC engine applies its own view to the bits in the page, the
387 	 * physical block mark won't (in general) appear on a byte boundary in
388 	 * the data.
389 	 */
390 	src = data_buf[buf_offset] >> bit_offset;
391 	src |= data_buf[buf_offset + 1] << (8 - bit_offset);
392 
393 	dst = oob_buf[0];
394 
395 	oob_buf[0] = src;
396 
397 	data_buf[buf_offset] &= ~(0xff << bit_offset);
398 	data_buf[buf_offset + 1] &= 0xff << bit_offset;
399 
400 	data_buf[buf_offset] |= dst << bit_offset;
401 	data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
402 }
403 
404 /*
405  * Read data from NAND.
406  */
mxs_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int length)407 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
408 {
409 	struct nand_chip *nand = mtd_to_nand(mtd);
410 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
411 	struct mxs_dma_desc *d;
412 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
413 	int ret;
414 
415 	if (length > NAND_MAX_PAGESIZE) {
416 		printf("MXS NAND: DMA buffer too big\n");
417 		return;
418 	}
419 
420 	if (!buf) {
421 		printf("MXS NAND: DMA buffer is NULL\n");
422 		return;
423 	}
424 
425 	/* Compile the DMA descriptor - a descriptor that reads data. */
426 	d = mxs_nand_get_dma_desc(nand_info);
427 	d->cmd.data =
428 		MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
429 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
430 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
431 		(length << MXS_DMA_DESC_BYTES_OFFSET);
432 
433 	d->cmd.address = (dma_addr_t)nand_info->data_buf;
434 
435 	d->cmd.pio_words[0] =
436 		GPMI_CTRL0_COMMAND_MODE_READ |
437 		GPMI_CTRL0_WORD_LENGTH |
438 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
439 		GPMI_CTRL0_ADDRESS_NAND_DATA |
440 		length;
441 
442 	mxs_dma_desc_append(channel, d);
443 
444 	/*
445 	 * A DMA descriptor that waits for the command to end and the chip to
446 	 * become ready.
447 	 *
448 	 * I think we actually should *not* be waiting for the chip to become
449 	 * ready because, after all, we don't care. I think the original code
450 	 * did that and no one has re-thought it yet.
451 	 */
452 	d = mxs_nand_get_dma_desc(nand_info);
453 	d->cmd.data =
454 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
455 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
456 		MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
457 
458 	d->cmd.address = 0;
459 
460 	d->cmd.pio_words[0] =
461 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
462 		GPMI_CTRL0_WORD_LENGTH |
463 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
464 		GPMI_CTRL0_ADDRESS_NAND_DATA;
465 
466 	mxs_dma_desc_append(channel, d);
467 
468 	/* Invalidate caches */
469 	mxs_nand_inval_data_buf(nand_info);
470 
471 	/* Execute the DMA chain. */
472 	ret = mxs_dma_go(channel);
473 	if (ret) {
474 		printf("MXS NAND: DMA read error\n");
475 		goto rtn;
476 	}
477 
478 	/* Invalidate caches */
479 	mxs_nand_inval_data_buf(nand_info);
480 
481 	memcpy(buf, nand_info->data_buf, length);
482 
483 rtn:
484 	mxs_nand_return_dma_descs(nand_info);
485 }
486 
487 /*
488  * Write data to NAND.
489  */
mxs_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int length)490 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
491 				int length)
492 {
493 	struct nand_chip *nand = mtd_to_nand(mtd);
494 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
495 	struct mxs_dma_desc *d;
496 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
497 	int ret;
498 
499 	if (length > NAND_MAX_PAGESIZE) {
500 		printf("MXS NAND: DMA buffer too big\n");
501 		return;
502 	}
503 
504 	if (!buf) {
505 		printf("MXS NAND: DMA buffer is NULL\n");
506 		return;
507 	}
508 
509 	memcpy(nand_info->data_buf, buf, length);
510 
511 	/* Compile the DMA descriptor - a descriptor that writes data. */
512 	d = mxs_nand_get_dma_desc(nand_info);
513 	d->cmd.data =
514 		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
515 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
516 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
517 		(length << MXS_DMA_DESC_BYTES_OFFSET);
518 
519 	d->cmd.address = (dma_addr_t)nand_info->data_buf;
520 
521 	d->cmd.pio_words[0] =
522 		GPMI_CTRL0_COMMAND_MODE_WRITE |
523 		GPMI_CTRL0_WORD_LENGTH |
524 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
525 		GPMI_CTRL0_ADDRESS_NAND_DATA |
526 		length;
527 
528 	mxs_dma_desc_append(channel, d);
529 
530 	/* Flush caches */
531 	mxs_nand_flush_data_buf(nand_info);
532 
533 	/* Execute the DMA chain. */
534 	ret = mxs_dma_go(channel);
535 	if (ret)
536 		printf("MXS NAND: DMA write error\n");
537 
538 	mxs_nand_return_dma_descs(nand_info);
539 }
540 
541 /*
542  * Read a single byte from NAND.
543  */
mxs_nand_read_byte(struct mtd_info * mtd)544 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
545 {
546 	uint8_t buf;
547 	mxs_nand_read_buf(mtd, &buf, 1);
548 	return buf;
549 }
550 
551 /*
552  * Read a page from NAND.
553  */
mxs_nand_ecc_read_page(struct mtd_info * mtd,struct nand_chip * nand,uint8_t * buf,int oob_required,int page)554 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
555 					uint8_t *buf, int oob_required,
556 					int page)
557 {
558 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
559 	struct bch_geometry *geo = &nand_info->bch_geometry;
560 	struct mxs_dma_desc *d;
561 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
562 	uint32_t corrected = 0, failed = 0;
563 	uint8_t	*status;
564 	int i, ret;
565 
566 	/* Compile the DMA descriptor - wait for ready. */
567 	d = mxs_nand_get_dma_desc(nand_info);
568 	d->cmd.data =
569 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
570 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
571 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
572 
573 	d->cmd.address = 0;
574 
575 	d->cmd.pio_words[0] =
576 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
577 		GPMI_CTRL0_WORD_LENGTH |
578 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
579 		GPMI_CTRL0_ADDRESS_NAND_DATA;
580 
581 	mxs_dma_desc_append(channel, d);
582 
583 	/* Compile the DMA descriptor - enable the BCH block and read. */
584 	d = mxs_nand_get_dma_desc(nand_info);
585 	d->cmd.data =
586 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
587 		MXS_DMA_DESC_WAIT4END |	(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
588 
589 	d->cmd.address = 0;
590 
591 	d->cmd.pio_words[0] =
592 		GPMI_CTRL0_COMMAND_MODE_READ |
593 		GPMI_CTRL0_WORD_LENGTH |
594 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
595 		GPMI_CTRL0_ADDRESS_NAND_DATA |
596 		(mtd->writesize + mtd->oobsize);
597 	d->cmd.pio_words[1] = 0;
598 	d->cmd.pio_words[2] =
599 		GPMI_ECCCTRL_ENABLE_ECC |
600 		GPMI_ECCCTRL_ECC_CMD_DECODE |
601 		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
602 	d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
603 	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
604 	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
605 
606 	mxs_dma_desc_append(channel, d);
607 
608 	/* Compile the DMA descriptor - disable the BCH block. */
609 	d = mxs_nand_get_dma_desc(nand_info);
610 	d->cmd.data =
611 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
612 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
613 		(3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
614 
615 	d->cmd.address = 0;
616 
617 	d->cmd.pio_words[0] =
618 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
619 		GPMI_CTRL0_WORD_LENGTH |
620 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
621 		GPMI_CTRL0_ADDRESS_NAND_DATA |
622 		(mtd->writesize + mtd->oobsize);
623 	d->cmd.pio_words[1] = 0;
624 	d->cmd.pio_words[2] = 0;
625 
626 	mxs_dma_desc_append(channel, d);
627 
628 	/* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
629 	d = mxs_nand_get_dma_desc(nand_info);
630 	d->cmd.data =
631 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
632 		MXS_DMA_DESC_DEC_SEM;
633 
634 	d->cmd.address = 0;
635 
636 	mxs_dma_desc_append(channel, d);
637 
638 	/* Invalidate caches */
639 	mxs_nand_inval_data_buf(nand_info);
640 
641 	/* Execute the DMA chain. */
642 	ret = mxs_dma_go(channel);
643 	if (ret) {
644 		printf("MXS NAND: DMA read error\n");
645 		goto rtn;
646 	}
647 
648 	ret = mxs_nand_wait_for_bch_complete(nand_info);
649 	if (ret) {
650 		printf("MXS NAND: BCH read timeout\n");
651 		goto rtn;
652 	}
653 
654 	/* Invalidate caches */
655 	mxs_nand_inval_data_buf(nand_info);
656 
657 	/* Read DMA completed, now do the mark swapping. */
658 	mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
659 
660 	/* Loop over status bytes, accumulating ECC status. */
661 	status = nand_info->oob_buf + mxs_nand_aux_status_offset();
662 	for (i = 0; i < geo->ecc_chunk_count; i++) {
663 		if (status[i] == 0x00)
664 			continue;
665 
666 		if (status[i] == 0xff)
667 			continue;
668 
669 		if (status[i] == 0xfe) {
670 			failed++;
671 			continue;
672 		}
673 
674 		corrected += status[i];
675 	}
676 
677 	/* Propagate ECC status to the owning MTD. */
678 	mtd->ecc_stats.failed += failed;
679 	mtd->ecc_stats.corrected += corrected;
680 
681 	/*
682 	 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
683 	 * details about our policy for delivering the OOB.
684 	 *
685 	 * We fill the caller's buffer with set bits, and then copy the block
686 	 * mark to the caller's buffer. Note that, if block mark swapping was
687 	 * necessary, it has already been done, so we can rely on the first
688 	 * byte of the auxiliary buffer to contain the block mark.
689 	 */
690 	memset(nand->oob_poi, 0xff, mtd->oobsize);
691 
692 	nand->oob_poi[0] = nand_info->oob_buf[0];
693 
694 	memcpy(buf, nand_info->data_buf, mtd->writesize);
695 
696 rtn:
697 	mxs_nand_return_dma_descs(nand_info);
698 
699 	return ret;
700 }
701 
702 /*
703  * Write a page to NAND.
704  */
mxs_nand_ecc_write_page(struct mtd_info * mtd,struct nand_chip * nand,const uint8_t * buf,int oob_required,int page)705 static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
706 				struct nand_chip *nand, const uint8_t *buf,
707 				int oob_required, int page)
708 {
709 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
710 	struct bch_geometry *geo = &nand_info->bch_geometry;
711 	struct mxs_dma_desc *d;
712 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
713 	int ret;
714 
715 	memcpy(nand_info->data_buf, buf, mtd->writesize);
716 	memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
717 
718 	/* Handle block mark swapping. */
719 	mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
720 
721 	/* Compile the DMA descriptor - write data. */
722 	d = mxs_nand_get_dma_desc(nand_info);
723 	d->cmd.data =
724 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
725 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
726 		(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
727 
728 	d->cmd.address = 0;
729 
730 	d->cmd.pio_words[0] =
731 		GPMI_CTRL0_COMMAND_MODE_WRITE |
732 		GPMI_CTRL0_WORD_LENGTH |
733 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
734 		GPMI_CTRL0_ADDRESS_NAND_DATA;
735 	d->cmd.pio_words[1] = 0;
736 	d->cmd.pio_words[2] =
737 		GPMI_ECCCTRL_ENABLE_ECC |
738 		GPMI_ECCCTRL_ECC_CMD_ENCODE |
739 		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
740 	d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
741 	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
742 	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
743 
744 	if (is_mx7() && nand_info->en_randomizer) {
745 		d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
746 				       GPMI_ECCCTRL_RANDOMIZER_TYPE2;
747 		/*
748 		 * Write NAND page number needed to be randomized
749 		 * to GPMI_ECCCOUNT register.
750 		 *
751 		 * The value is between 0-255. For additional details
752 		 * check 9.6.6.4 of i.MX7D Applications Processor reference
753 		 */
754 		d->cmd.pio_words[3] |= (page % 255) << 16;
755 	}
756 
757 	mxs_dma_desc_append(channel, d);
758 
759 	/* Flush caches */
760 	mxs_nand_flush_data_buf(nand_info);
761 
762 	/* Execute the DMA chain. */
763 	ret = mxs_dma_go(channel);
764 	if (ret) {
765 		printf("MXS NAND: DMA write error\n");
766 		goto rtn;
767 	}
768 
769 	ret = mxs_nand_wait_for_bch_complete(nand_info);
770 	if (ret) {
771 		printf("MXS NAND: BCH write timeout\n");
772 		goto rtn;
773 	}
774 
775 rtn:
776 	mxs_nand_return_dma_descs(nand_info);
777 	return 0;
778 }
779 
780 /*
781  * Read OOB from NAND.
782  *
783  * This function is a veneer that replaces the function originally installed by
784  * the NAND Flash MTD code.
785  */
mxs_nand_hook_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)786 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
787 					struct mtd_oob_ops *ops)
788 {
789 	struct nand_chip *chip = mtd_to_nand(mtd);
790 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
791 	int ret;
792 
793 	if (ops->mode == MTD_OPS_RAW)
794 		nand_info->raw_oob_mode = 1;
795 	else
796 		nand_info->raw_oob_mode = 0;
797 
798 	ret = nand_info->hooked_read_oob(mtd, from, ops);
799 
800 	nand_info->raw_oob_mode = 0;
801 
802 	return ret;
803 }
804 
805 /*
806  * Write OOB to NAND.
807  *
808  * This function is a veneer that replaces the function originally installed by
809  * the NAND Flash MTD code.
810  */
mxs_nand_hook_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)811 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
812 					struct mtd_oob_ops *ops)
813 {
814 	struct nand_chip *chip = mtd_to_nand(mtd);
815 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
816 	int ret;
817 
818 	if (ops->mode == MTD_OPS_RAW)
819 		nand_info->raw_oob_mode = 1;
820 	else
821 		nand_info->raw_oob_mode = 0;
822 
823 	ret = nand_info->hooked_write_oob(mtd, to, ops);
824 
825 	nand_info->raw_oob_mode = 0;
826 
827 	return ret;
828 }
829 
830 /*
831  * Mark a block bad in NAND.
832  *
833  * This function is a veneer that replaces the function originally installed by
834  * the NAND Flash MTD code.
835  */
mxs_nand_hook_block_markbad(struct mtd_info * mtd,loff_t ofs)836 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
837 {
838 	struct nand_chip *chip = mtd_to_nand(mtd);
839 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
840 	int ret;
841 
842 	nand_info->marking_block_bad = 1;
843 
844 	ret = nand_info->hooked_block_markbad(mtd, ofs);
845 
846 	nand_info->marking_block_bad = 0;
847 
848 	return ret;
849 }
850 
851 /*
852  * There are several places in this driver where we have to handle the OOB and
853  * block marks. This is the function where things are the most complicated, so
854  * this is where we try to explain it all. All the other places refer back to
855  * here.
856  *
857  * These are the rules, in order of decreasing importance:
858  *
859  * 1) Nothing the caller does can be allowed to imperil the block mark, so all
860  *    write operations take measures to protect it.
861  *
862  * 2) In read operations, the first byte of the OOB we return must reflect the
863  *    true state of the block mark, no matter where that block mark appears in
864  *    the physical page.
865  *
866  * 3) ECC-based read operations return an OOB full of set bits (since we never
867  *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
868  *    return).
869  *
870  * 4) "Raw" read operations return a direct view of the physical bytes in the
871  *    page, using the conventional definition of which bytes are data and which
872  *    are OOB. This gives the caller a way to see the actual, physical bytes
873  *    in the page, without the distortions applied by our ECC engine.
874  *
875  * What we do for this specific read operation depends on whether we're doing
876  * "raw" read, or an ECC-based read.
877  *
878  * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
879  * easy. When reading a page, for example, the NAND Flash MTD code calls our
880  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
881  * ECC-based or raw view of the page is implicit in which function it calls
882  * (there is a similar pair of ECC-based/raw functions for writing).
883  *
884  * Since MTD assumes the OOB is not covered by ECC, there is no pair of
885  * ECC-based/raw functions for reading or or writing the OOB. The fact that the
886  * caller wants an ECC-based or raw view of the page is not propagated down to
887  * this driver.
888  *
889  * Since our OOB *is* covered by ECC, we need this information. So, we hook the
890  * ecc.read_oob and ecc.write_oob function pointers in the owning
891  * struct mtd_info with our own functions. These hook functions set the
892  * raw_oob_mode field so that, when control finally arrives here, we'll know
893  * what to do.
894  */
mxs_nand_ecc_read_oob(struct mtd_info * mtd,struct nand_chip * nand,int page)895 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
896 				int page)
897 {
898 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
899 
900 	/*
901 	 * First, fill in the OOB buffer. If we're doing a raw read, we need to
902 	 * get the bytes from the physical page. If we're not doing a raw read,
903 	 * we need to fill the buffer with set bits.
904 	 */
905 	if (nand_info->raw_oob_mode) {
906 		/*
907 		 * If control arrives here, we're doing a "raw" read. Send the
908 		 * command to read the conventional OOB and read it.
909 		 */
910 		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
911 		nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
912 	} else {
913 		/*
914 		 * If control arrives here, we're not doing a "raw" read. Fill
915 		 * the OOB buffer with set bits and correct the block mark.
916 		 */
917 		memset(nand->oob_poi, 0xff, mtd->oobsize);
918 
919 		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
920 		mxs_nand_read_buf(mtd, nand->oob_poi, 1);
921 	}
922 
923 	return 0;
924 
925 }
926 
927 /*
928  * Write OOB data to NAND.
929  */
mxs_nand_ecc_write_oob(struct mtd_info * mtd,struct nand_chip * nand,int page)930 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
931 					int page)
932 {
933 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
934 	uint8_t block_mark = 0;
935 
936 	/*
937 	 * There are fundamental incompatibilities between the i.MX GPMI NFC and
938 	 * the NAND Flash MTD model that make it essentially impossible to write
939 	 * the out-of-band bytes.
940 	 *
941 	 * We permit *ONE* exception. If the *intent* of writing the OOB is to
942 	 * mark a block bad, we can do that.
943 	 */
944 
945 	if (!nand_info->marking_block_bad) {
946 		printf("NXS NAND: Writing OOB isn't supported\n");
947 		return -EIO;
948 	}
949 
950 	/* Write the block mark. */
951 	nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
952 	nand->write_buf(mtd, &block_mark, 1);
953 	nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
954 
955 	/* Check if it worked. */
956 	if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
957 		return -EIO;
958 
959 	return 0;
960 }
961 
962 /*
963  * Claims all blocks are good.
964  *
965  * In principle, this function is *only* called when the NAND Flash MTD system
966  * isn't allowed to keep an in-memory bad block table, so it is forced to ask
967  * the driver for bad block information.
968  *
969  * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
970  * this function is *only* called when we take it away.
971  *
972  * Thus, this function is only called when we want *all* blocks to look good,
973  * so it *always* return success.
974  */
mxs_nand_block_bad(struct mtd_info * mtd,loff_t ofs)975 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
976 {
977 	return 0;
978 }
979 
mxs_nand_set_geometry(struct mtd_info * mtd,struct bch_geometry * geo)980 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
981 {
982 	struct nand_chip *chip = mtd_to_nand(mtd);
983 	struct nand_chip *nand = mtd_to_nand(mtd);
984 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
985 
986 	if (chip->ecc.strength > 0 && chip->ecc.size > 0)
987 		return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
988 				chip->ecc.strength, chip->ecc.size);
989 
990 	if (nand_info->use_minimum_ecc ||
991 		mxs_nand_calc_ecc_layout(geo, mtd)) {
992 		if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
993 			return -EINVAL;
994 
995 		return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
996 				chip->ecc_strength_ds, chip->ecc_step_ds);
997 	}
998 
999 	return 0;
1000 }
1001 
1002 /*
1003  * At this point, the physical NAND Flash chips have been identified and
1004  * counted, so we know the physical geometry. This enables us to make some
1005  * important configuration decisions.
1006  *
1007  * The return value of this function propagates directly back to this driver's
1008  * board_nand_init(). Anything other than zero will cause this driver to
1009  * tear everything down and declare failure.
1010  */
mxs_nand_setup_ecc(struct mtd_info * mtd)1011 int mxs_nand_setup_ecc(struct mtd_info *mtd)
1012 {
1013 	struct nand_chip *nand = mtd_to_nand(mtd);
1014 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1015 	struct bch_geometry *geo = &nand_info->bch_geometry;
1016 	struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1017 	uint32_t tmp;
1018 	int ret;
1019 
1020 	nand_info->en_randomizer = 0;
1021 	nand_info->oobsize = mtd->oobsize;
1022 	nand_info->writesize = mtd->writesize;
1023 
1024 	ret = mxs_nand_set_geometry(mtd, geo);
1025 	if (ret)
1026 		return ret;
1027 
1028 	mxs_nand_calc_mark_offset(geo, mtd->writesize);
1029 
1030 	/* Configure BCH and set NFC geometry */
1031 	mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1032 
1033 	/* Configure layout 0 */
1034 	tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1035 	tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1036 	tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1037 	tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1038 	tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1039 		BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1040 	writel(tmp, &bch_regs->hw_bch_flash0layout0);
1041 	nand_info->bch_flash0layout0 = tmp;
1042 
1043 	tmp = (mtd->writesize + mtd->oobsize)
1044 		<< BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1045 	tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1046 	tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1047 	tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1048 		BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1049 	writel(tmp, &bch_regs->hw_bch_flash0layout1);
1050 	nand_info->bch_flash0layout1 = tmp;
1051 
1052 	/* Set *all* chip selects to use layout 0 */
1053 	writel(0, &bch_regs->hw_bch_layoutselect);
1054 
1055 	/* Enable BCH complete interrupt */
1056 	writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1057 
1058 	/* Hook some operations at the MTD level. */
1059 	if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1060 		nand_info->hooked_read_oob = mtd->_read_oob;
1061 		mtd->_read_oob = mxs_nand_hook_read_oob;
1062 	}
1063 
1064 	if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1065 		nand_info->hooked_write_oob = mtd->_write_oob;
1066 		mtd->_write_oob = mxs_nand_hook_write_oob;
1067 	}
1068 
1069 	if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1070 		nand_info->hooked_block_markbad = mtd->_block_markbad;
1071 		mtd->_block_markbad = mxs_nand_hook_block_markbad;
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 /*
1078  * Allocate DMA buffers
1079  */
mxs_nand_alloc_buffers(struct mxs_nand_info * nand_info)1080 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1081 {
1082 	uint8_t *buf;
1083 	const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1084 
1085 	nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1086 
1087 	/* DMA buffers */
1088 	buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1089 	if (!buf) {
1090 		printf("MXS NAND: Error allocating DMA buffers\n");
1091 		return -ENOMEM;
1092 	}
1093 
1094 	memset(buf, 0, nand_info->data_buf_size);
1095 
1096 	nand_info->data_buf = buf;
1097 	nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1098 	/* Command buffers */
1099 	nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1100 				MXS_NAND_COMMAND_BUFFER_SIZE);
1101 	if (!nand_info->cmd_buf) {
1102 		free(buf);
1103 		printf("MXS NAND: Error allocating command buffers\n");
1104 		return -ENOMEM;
1105 	}
1106 	memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1107 	nand_info->cmd_queue_len = 0;
1108 
1109 	return 0;
1110 }
1111 
1112 /*
1113  * Initializes the NFC hardware.
1114  */
mxs_nand_init_dma(struct mxs_nand_info * info)1115 static int mxs_nand_init_dma(struct mxs_nand_info *info)
1116 {
1117 	int i = 0, j, ret = 0;
1118 
1119 	info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1120 				MXS_NAND_DMA_DESCRIPTOR_COUNT);
1121 	if (!info->desc) {
1122 		ret = -ENOMEM;
1123 		goto err1;
1124 	}
1125 
1126 	/* Allocate the DMA descriptors. */
1127 	for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1128 		info->desc[i] = mxs_dma_desc_alloc();
1129 		if (!info->desc[i]) {
1130 			ret = -ENOMEM;
1131 			goto err2;
1132 		}
1133 	}
1134 
1135 	/* Init the DMA controller. */
1136 	mxs_dma_init();
1137 	for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1138 		j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1139 		ret = mxs_dma_init_channel(j);
1140 		if (ret)
1141 			goto err3;
1142 	}
1143 
1144 	/* Reset the GPMI block. */
1145 	mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1146 	mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1147 
1148 	/*
1149 	 * Choose NAND mode, set IRQ polarity, disable write protection and
1150 	 * select BCH ECC.
1151 	 */
1152 	clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1153 			GPMI_CTRL1_GPMI_MODE,
1154 			GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1155 			GPMI_CTRL1_BCH_MODE);
1156 
1157 	return 0;
1158 
1159 err3:
1160 	for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1161 		mxs_dma_release(j);
1162 err2:
1163 	for (--i; i >= 0; i--)
1164 		mxs_dma_desc_free(info->desc[i]);
1165 	free(info->desc);
1166 err1:
1167 	if (ret == -ENOMEM)
1168 		printf("MXS NAND: Unable to allocate DMA descriptors\n");
1169 	return ret;
1170 }
1171 
mxs_nand_init_spl(struct nand_chip * nand)1172 int mxs_nand_init_spl(struct nand_chip *nand)
1173 {
1174 	struct mxs_nand_info *nand_info;
1175 	int err;
1176 
1177 	nand_info = malloc(sizeof(struct mxs_nand_info));
1178 	if (!nand_info) {
1179 		printf("MXS NAND: Failed to allocate private data\n");
1180 		return -ENOMEM;
1181 	}
1182 	memset(nand_info, 0, sizeof(struct mxs_nand_info));
1183 
1184 	nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1185 	nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1186 
1187 	if (is_mx6sx() || is_mx7())
1188 		nand_info->max_ecc_strength_supported = 62;
1189 	else
1190 		nand_info->max_ecc_strength_supported = 40;
1191 
1192 	err = mxs_nand_alloc_buffers(nand_info);
1193 	if (err)
1194 		return err;
1195 
1196 	err = mxs_nand_init_dma(nand_info);
1197 	if (err)
1198 		return err;
1199 
1200 	nand_set_controller_data(nand, nand_info);
1201 
1202 	nand->options |= NAND_NO_SUBPAGE_WRITE;
1203 
1204 	nand->cmd_ctrl		= mxs_nand_cmd_ctrl;
1205 	nand->dev_ready		= mxs_nand_device_ready;
1206 	nand->select_chip	= mxs_nand_select_chip;
1207 
1208 	nand->read_byte		= mxs_nand_read_byte;
1209 	nand->read_buf		= mxs_nand_read_buf;
1210 
1211 	nand->ecc.read_page	= mxs_nand_ecc_read_page;
1212 
1213 	nand->ecc.mode		= NAND_ECC_HW;
1214 
1215 	return 0;
1216 }
1217 
mxs_nand_init_ctrl(struct mxs_nand_info * nand_info)1218 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1219 {
1220 	struct mtd_info *mtd;
1221 	struct nand_chip *nand;
1222 	int err;
1223 
1224 	nand = &nand_info->chip;
1225 	mtd = nand_to_mtd(nand);
1226 	err = mxs_nand_alloc_buffers(nand_info);
1227 	if (err)
1228 		return err;
1229 
1230 	err = mxs_nand_init_dma(nand_info);
1231 	if (err)
1232 		goto err_free_buffers;
1233 
1234 	memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1235 
1236 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1237 	nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1238 #endif
1239 
1240 	nand_set_controller_data(nand, nand_info);
1241 	nand->options |= NAND_NO_SUBPAGE_WRITE;
1242 
1243 	if (nand_info->dev)
1244 		nand->flash_node = dev_of_offset(nand_info->dev);
1245 
1246 	nand->cmd_ctrl		= mxs_nand_cmd_ctrl;
1247 
1248 	nand->dev_ready		= mxs_nand_device_ready;
1249 	nand->select_chip	= mxs_nand_select_chip;
1250 	nand->block_bad		= mxs_nand_block_bad;
1251 
1252 	nand->read_byte		= mxs_nand_read_byte;
1253 
1254 	nand->read_buf		= mxs_nand_read_buf;
1255 	nand->write_buf		= mxs_nand_write_buf;
1256 
1257 	/* first scan to find the device and get the page size */
1258 	if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1259 		goto err_free_buffers;
1260 
1261 	if (mxs_nand_setup_ecc(mtd))
1262 		goto err_free_buffers;
1263 
1264 	nand->ecc.read_page	= mxs_nand_ecc_read_page;
1265 	nand->ecc.write_page	= mxs_nand_ecc_write_page;
1266 	nand->ecc.read_oob	= mxs_nand_ecc_read_oob;
1267 	nand->ecc.write_oob	= mxs_nand_ecc_write_oob;
1268 
1269 	nand->ecc.layout	= &fake_ecc_layout;
1270 	nand->ecc.mode		= NAND_ECC_HW;
1271 	nand->ecc.size		= nand_info->bch_geometry.ecc_chunk_size;
1272 	nand->ecc.strength	= nand_info->bch_geometry.ecc_strength;
1273 
1274 	/* second phase scan */
1275 	err = nand_scan_tail(mtd);
1276 	if (err)
1277 		goto err_free_buffers;
1278 
1279 	err = nand_register(0, mtd);
1280 	if (err)
1281 		goto err_free_buffers;
1282 
1283 	return 0;
1284 
1285 err_free_buffers:
1286 	free(nand_info->data_buf);
1287 	free(nand_info->cmd_buf);
1288 
1289 	return err;
1290 }
1291 
1292 #ifndef CONFIG_NAND_MXS_DT
board_nand_init(void)1293 void board_nand_init(void)
1294 {
1295 	struct mxs_nand_info *nand_info;
1296 
1297 	nand_info = malloc(sizeof(struct mxs_nand_info));
1298 	if (!nand_info) {
1299 		printf("MXS NAND: Failed to allocate private data\n");
1300 			return;
1301 	}
1302 	memset(nand_info, 0, sizeof(struct mxs_nand_info));
1303 
1304 	nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1305 	nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1306 
1307 	/* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1308 	if (is_mx6sx() || is_mx7())
1309 		nand_info->max_ecc_strength_supported = 62;
1310 	else
1311 		nand_info->max_ecc_strength_supported = 40;
1312 
1313 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1314 	nand_info->use_minimum_ecc = true;
1315 #endif
1316 
1317 	if (mxs_nand_init_ctrl(nand_info) < 0)
1318 		goto err;
1319 
1320 	return;
1321 
1322 err:
1323 	free(nand_info);
1324 }
1325 #endif
1326 
1327 /*
1328  * Read NAND layout for FCB block generation.
1329  */
mxs_nand_get_layout(struct mtd_info * mtd,struct mxs_nand_layout * l)1330 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1331 {
1332 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1333 	u32 tmp;
1334 
1335 	tmp = readl(&bch_regs->hw_bch_flash0layout0);
1336 	l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1337 			BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1338 	l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1339 			BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1340 
1341 	tmp = readl(&bch_regs->hw_bch_flash0layout1);
1342 	l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1343 			BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1344 	l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1345 			BCH_FLASHLAYOUT0_ECC0_OFFSET;
1346 	l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1347 			BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1348 	l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1349 			BCH_FLASHLAYOUT1_ECCN_OFFSET;
1350 }
1351 
1352 /*
1353  * Set BCH to specific layout used by ROM bootloader to read FCB.
1354  */
mxs_nand_mode_fcb(struct mtd_info * mtd)1355 void mxs_nand_mode_fcb(struct mtd_info *mtd)
1356 {
1357 	u32 tmp;
1358 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1359 	struct nand_chip *nand = mtd_to_nand(mtd);
1360 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1361 
1362 	nand_info->en_randomizer = 1;
1363 
1364 	mtd->writesize = 1024;
1365 	mtd->oobsize = 1862 - 1024;
1366 
1367 	/* 8 ecc_chunks_*/
1368 	tmp = 7	<< BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1369 	/* 32 bytes for metadata */
1370 	tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1371 	/* using ECC62 level to be performed */
1372 	tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1373 	/* 0x20 * 4 bytes of the data0 block */
1374 	tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1375 	tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1376 	writel(tmp, &bch_regs->hw_bch_flash0layout0);
1377 
1378 	/* 1024 for data + 838 for OOB */
1379 	tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1380 	/* using ECC62 level to be performed */
1381 	tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1382 	/* 0x20 * 4 bytes of the data0 block */
1383 	tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1384 	tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1385 	writel(tmp, &bch_regs->hw_bch_flash0layout1);
1386 }
1387 
1388 /*
1389  * Restore BCH to normal settings.
1390  */
mxs_nand_mode_normal(struct mtd_info * mtd)1391 void mxs_nand_mode_normal(struct mtd_info *mtd)
1392 {
1393 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1394 	struct nand_chip *nand = mtd_to_nand(mtd);
1395 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1396 
1397 	nand_info->en_randomizer = 0;
1398 
1399 	mtd->writesize = nand_info->writesize;
1400 	mtd->oobsize = nand_info->oobsize;
1401 
1402 	writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1403 	writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1404 }
1405 
mxs_nand_mark_byte_offset(struct mtd_info * mtd)1406 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1407 {
1408 	struct nand_chip *chip = mtd_to_nand(mtd);
1409 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1410 	struct bch_geometry *geo = &nand_info->bch_geometry;
1411 
1412 	return geo->block_mark_byte_offset;
1413 }
1414 
mxs_nand_mark_bit_offset(struct mtd_info * mtd)1415 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1416 {
1417 	struct nand_chip *chip = mtd_to_nand(mtd);
1418 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1419 	struct bch_geometry *geo = &nand_info->bch_geometry;
1420 
1421 	return geo->block_mark_bit_offset;
1422 }
1423