• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *	Additional technical information is available on
7  *	http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *	David Woodhouse for adding multichip support
14  *
15  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *	rework for 2K page size chips
17  *
18  *  TODO:
19  *	Enable cached programming for 2k page size chips
20  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *	if we have HW ECC support.
22  *	BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50 
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52 
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 			     struct mtd_oob_ops *ops);
55 
56 /* Define default oob placement schemes for large and small page devices */
nand_ooblayout_ecc_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 				 struct mtd_oob_region *oobregion)
59 {
60 	struct nand_chip *chip = mtd_to_nand(mtd);
61 	struct nand_ecc_ctrl *ecc = &chip->ecc;
62 
63 	if (section > 1)
64 		return -ERANGE;
65 
66 	if (!section) {
67 		oobregion->offset = 0;
68 		if (mtd->oobsize == 16)
69 			oobregion->length = 4;
70 		else
71 			oobregion->length = 3;
72 	} else {
73 		if (mtd->oobsize == 8)
74 			return -ERANGE;
75 
76 		oobregion->offset = 6;
77 		oobregion->length = ecc->total - 4;
78 	}
79 
80 	return 0;
81 }
82 
nand_ooblayout_free_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 				  struct mtd_oob_region *oobregion)
85 {
86 	if (section > 1)
87 		return -ERANGE;
88 
89 	if (mtd->oobsize == 16) {
90 		if (section)
91 			return -ERANGE;
92 
93 		oobregion->length = 8;
94 		oobregion->offset = 8;
95 	} else {
96 		oobregion->length = 2;
97 		if (!section)
98 			oobregion->offset = 3;
99 		else
100 			oobregion->offset = 6;
101 	}
102 
103 	return 0;
104 }
105 
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 	.ecc = nand_ooblayout_ecc_sp,
108 	.free = nand_ooblayout_free_sp,
109 };
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111 
nand_ooblayout_ecc_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 				 struct mtd_oob_region *oobregion)
114 {
115 	struct nand_chip *chip = mtd_to_nand(mtd);
116 	struct nand_ecc_ctrl *ecc = &chip->ecc;
117 
118 	if (section)
119 		return -ERANGE;
120 
121 	oobregion->length = ecc->total;
122 	oobregion->offset = mtd->oobsize - oobregion->length;
123 
124 	return 0;
125 }
126 
nand_ooblayout_free_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 				  struct mtd_oob_region *oobregion)
129 {
130 	struct nand_chip *chip = mtd_to_nand(mtd);
131 	struct nand_ecc_ctrl *ecc = &chip->ecc;
132 
133 	if (section)
134 		return -ERANGE;
135 
136 	oobregion->length = mtd->oobsize - ecc->total - 2;
137 	oobregion->offset = 2;
138 
139 	return 0;
140 }
141 
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 	.ecc = nand_ooblayout_ecc_lp,
144 	.free = nand_ooblayout_free_lp,
145 };
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147 
148 /*
149  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150  * are placed at a fixed offset.
151  */
nand_ooblayout_ecc_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 					 struct mtd_oob_region *oobregion)
154 {
155 	struct nand_chip *chip = mtd_to_nand(mtd);
156 	struct nand_ecc_ctrl *ecc = &chip->ecc;
157 
158 	if (section)
159 		return -ERANGE;
160 
161 	switch (mtd->oobsize) {
162 	case 64:
163 		oobregion->offset = 40;
164 		break;
165 	case 128:
166 		oobregion->offset = 80;
167 		break;
168 	default:
169 		return -EINVAL;
170 	}
171 
172 	oobregion->length = ecc->total;
173 	if (oobregion->offset + oobregion->length > mtd->oobsize)
174 		return -ERANGE;
175 
176 	return 0;
177 }
178 
nand_ooblayout_free_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 					  struct mtd_oob_region *oobregion)
181 {
182 	struct nand_chip *chip = mtd_to_nand(mtd);
183 	struct nand_ecc_ctrl *ecc = &chip->ecc;
184 	int ecc_offset = 0;
185 
186 	if (section < 0 || section > 1)
187 		return -ERANGE;
188 
189 	switch (mtd->oobsize) {
190 	case 64:
191 		ecc_offset = 40;
192 		break;
193 	case 128:
194 		ecc_offset = 80;
195 		break;
196 	default:
197 		return -EINVAL;
198 	}
199 
200 	if (section == 0) {
201 		oobregion->offset = 2;
202 		oobregion->length = ecc_offset - 2;
203 	} else {
204 		oobregion->offset = ecc_offset + ecc->total;
205 		oobregion->length = mtd->oobsize - oobregion->offset;
206 	}
207 
208 	return 0;
209 }
210 
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 	.ecc = nand_ooblayout_ecc_lp_hamming,
213 	.free = nand_ooblayout_free_lp_hamming,
214 };
215 
check_offs_len(struct mtd_info * mtd,loff_t ofs,uint64_t len)216 static int check_offs_len(struct mtd_info *mtd,
217 					loff_t ofs, uint64_t len)
218 {
219 	struct nand_chip *chip = mtd_to_nand(mtd);
220 	int ret = 0;
221 
222 	/* Start address must align on block boundary */
223 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 		pr_debug("%s: unaligned address\n", __func__);
225 		ret = -EINVAL;
226 	}
227 
228 	/* Length must align on block boundary */
229 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 		pr_debug("%s: length not block aligned\n", __func__);
231 		ret = -EINVAL;
232 	}
233 
234 	return ret;
235 }
236 
237 /**
238  * nand_release_device - [GENERIC] release chip
239  * @mtd: MTD device structure
240  *
241  * Release chip lock and wake up anyone waiting on the device.
242  */
nand_release_device(struct mtd_info * mtd)243 static void nand_release_device(struct mtd_info *mtd)
244 {
245 	struct nand_chip *chip = mtd_to_nand(mtd);
246 
247 	/* Release the controller and the chip */
248 	spin_lock(&chip->controller->lock);
249 	chip->controller->active = NULL;
250 	chip->state = FL_READY;
251 	wake_up(&chip->controller->wq);
252 	spin_unlock(&chip->controller->lock);
253 }
254 
255 /**
256  * nand_read_byte - [DEFAULT] read one byte from the chip
257  * @mtd: MTD device structure
258  *
259  * Default read function for 8bit buswidth
260  */
nand_read_byte(struct mtd_info * mtd)261 static uint8_t nand_read_byte(struct mtd_info *mtd)
262 {
263 	struct nand_chip *chip = mtd_to_nand(mtd);
264 	return readb(chip->IO_ADDR_R);
265 }
266 
267 /**
268  * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269  * @mtd: MTD device structure
270  *
271  * Default read function for 16bit buswidth with endianness conversion.
272  *
273  */
nand_read_byte16(struct mtd_info * mtd)274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
275 {
276 	struct nand_chip *chip = mtd_to_nand(mtd);
277 	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278 }
279 
280 /**
281  * nand_read_word - [DEFAULT] read one word from the chip
282  * @mtd: MTD device structure
283  *
284  * Default read function for 16bit buswidth without endianness conversion.
285  */
nand_read_word(struct mtd_info * mtd)286 static u16 nand_read_word(struct mtd_info *mtd)
287 {
288 	struct nand_chip *chip = mtd_to_nand(mtd);
289 	return readw(chip->IO_ADDR_R);
290 }
291 
292 /**
293  * nand_select_chip - [DEFAULT] control CE line
294  * @mtd: MTD device structure
295  * @chipnr: chipnumber to select, -1 for deselect
296  *
297  * Default select function for 1 chip devices.
298  */
nand_select_chip(struct mtd_info * mtd,int chipnr)299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300 {
301 	struct nand_chip *chip = mtd_to_nand(mtd);
302 
303 	switch (chipnr) {
304 	case -1:
305 		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 		break;
307 	case 0:
308 		break;
309 
310 	default:
311 		BUG();
312 	}
313 }
314 
315 /**
316  * nand_write_byte - [DEFAULT] write single byte to chip
317  * @mtd: MTD device structure
318  * @byte: value to write
319  *
320  * Default function to write a byte to I/O[7:0]
321  */
nand_write_byte(struct mtd_info * mtd,uint8_t byte)322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323 {
324 	struct nand_chip *chip = mtd_to_nand(mtd);
325 
326 	chip->write_buf(mtd, &byte, 1);
327 }
328 
329 /**
330  * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331  * @mtd: MTD device structure
332  * @byte: value to write
333  *
334  * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
335  */
nand_write_byte16(struct mtd_info * mtd,uint8_t byte)336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337 {
338 	struct nand_chip *chip = mtd_to_nand(mtd);
339 	uint16_t word = byte;
340 
341 	/*
342 	 * It's not entirely clear what should happen to I/O[15:8] when writing
343 	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
344 	 *
345 	 *    When the host supports a 16-bit bus width, only data is
346 	 *    transferred at the 16-bit width. All address and command line
347 	 *    transfers shall use only the lower 8-bits of the data bus. During
348 	 *    command transfers, the host may place any value on the upper
349 	 *    8-bits of the data bus. During address transfers, the host shall
350 	 *    set the upper 8-bits of the data bus to 00h.
351 	 *
352 	 * One user of the write_byte callback is nand_onfi_set_features. The
353 	 * four parameters are specified to be written to I/O[7:0], but this is
354 	 * neither an address nor a command transfer. Let's assume a 0 on the
355 	 * upper I/O lines is OK.
356 	 */
357 	chip->write_buf(mtd, (uint8_t *)&word, 2);
358 }
359 
360 /**
361  * nand_write_buf - [DEFAULT] write buffer to chip
362  * @mtd: MTD device structure
363  * @buf: data buffer
364  * @len: number of bytes to write
365  *
366  * Default write function for 8bit buswidth.
367  */
nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369 {
370 	struct nand_chip *chip = mtd_to_nand(mtd);
371 
372 	iowrite8_rep(chip->IO_ADDR_W, buf, len);
373 }
374 
375 /**
376  * nand_read_buf - [DEFAULT] read chip data into buffer
377  * @mtd: MTD device structure
378  * @buf: buffer to store date
379  * @len: number of bytes to read
380  *
381  * Default read function for 8bit buswidth.
382  */
nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384 {
385 	struct nand_chip *chip = mtd_to_nand(mtd);
386 
387 	ioread8_rep(chip->IO_ADDR_R, buf, len);
388 }
389 
390 /**
391  * nand_write_buf16 - [DEFAULT] write buffer to chip
392  * @mtd: MTD device structure
393  * @buf: data buffer
394  * @len: number of bytes to write
395  *
396  * Default write function for 16bit buswidth.
397  */
nand_write_buf16(struct mtd_info * mtd,const uint8_t * buf,int len)398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399 {
400 	struct nand_chip *chip = mtd_to_nand(mtd);
401 	u16 *p = (u16 *) buf;
402 
403 	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404 }
405 
406 /**
407  * nand_read_buf16 - [DEFAULT] read chip data into buffer
408  * @mtd: MTD device structure
409  * @buf: buffer to store date
410  * @len: number of bytes to read
411  *
412  * Default read function for 16bit buswidth.
413  */
nand_read_buf16(struct mtd_info * mtd,uint8_t * buf,int len)414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415 {
416 	struct nand_chip *chip = mtd_to_nand(mtd);
417 	u16 *p = (u16 *) buf;
418 
419 	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420 }
421 
422 /**
423  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424  * @mtd: MTD device structure
425  * @ofs: offset from device start
426  *
427  * Check, if the block is bad.
428  */
nand_block_bad(struct mtd_info * mtd,loff_t ofs)429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430 {
431 	int page, page_end, res;
432 	struct nand_chip *chip = mtd_to_nand(mtd);
433 	u8 bad;
434 
435 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 		ofs += mtd->erasesize - mtd->writesize;
437 
438 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 	page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440 
441 	for (; page < page_end; page++) {
442 		res = chip->ecc.read_oob(mtd, chip, page);
443 		if (res < 0)
444 			return res;
445 
446 		bad = chip->oob_poi[chip->badblockpos];
447 
448 		if (likely(chip->badblockbits == 8))
449 			res = bad != 0xFF;
450 		else
451 			res = hweight8(bad) < chip->badblockbits;
452 		if (res)
453 			return res;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461  * @mtd: MTD device structure
462  * @ofs: offset from device start
463  *
464  * This is the default implementation, which can be overridden by a hardware
465  * specific driver. It provides the details for writing a bad block marker to a
466  * block.
467  */
nand_default_block_markbad(struct mtd_info * mtd,loff_t ofs)468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469 {
470 	struct nand_chip *chip = mtd_to_nand(mtd);
471 	struct mtd_oob_ops ops;
472 	uint8_t buf[2] = { 0, 0 };
473 	int ret = 0, res, i = 0;
474 
475 	memset(&ops, 0, sizeof(ops));
476 	ops.oobbuf = buf;
477 	ops.ooboffs = chip->badblockpos;
478 	if (chip->options & NAND_BUSWIDTH_16) {
479 		ops.ooboffs &= ~0x01;
480 		ops.len = ops.ooblen = 2;
481 	} else {
482 		ops.len = ops.ooblen = 1;
483 	}
484 	ops.mode = MTD_OPS_PLACE_OOB;
485 
486 	/* Write to first/last page(s) if necessary */
487 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 		ofs += mtd->erasesize - mtd->writesize;
489 	do {
490 		res = nand_do_write_oob(mtd, ofs, &ops);
491 		if (!ret)
492 			ret = res;
493 
494 		i++;
495 		ofs += mtd->writesize;
496 	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497 
498 	return ret;
499 }
500 
501 /**
502  * nand_block_markbad_lowlevel - mark a block bad
503  * @mtd: MTD device structure
504  * @ofs: offset from device start
505  *
506  * This function performs the generic NAND bad block marking steps (i.e., bad
507  * block table(s) and/or marker(s)). We only allow the hardware driver to
508  * specify how to write bad block markers to OOB (chip->block_markbad).
509  *
510  * We try operations in the following order:
511  *
512  *  (1) erase the affected block, to allow OOB marker to be written cleanly
513  *  (2) write bad block marker to OOB area of affected block (unless flag
514  *      NAND_BBT_NO_OOB_BBM is present)
515  *  (3) update the BBT
516  *
517  * Note that we retain the first error encountered in (2) or (3), finish the
518  * procedures, and dump the error in the end.
519 */
nand_block_markbad_lowlevel(struct mtd_info * mtd,loff_t ofs)520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521 {
522 	struct nand_chip *chip = mtd_to_nand(mtd);
523 	int res, ret = 0;
524 
525 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 		struct erase_info einfo;
527 
528 		/* Attempt erase before marking OOB */
529 		memset(&einfo, 0, sizeof(einfo));
530 		einfo.mtd = mtd;
531 		einfo.addr = ofs;
532 		einfo.len = 1ULL << chip->phys_erase_shift;
533 		nand_erase_nand(mtd, &einfo, 0);
534 
535 		/* Write bad block marker to OOB */
536 		nand_get_device(mtd, FL_WRITING);
537 		ret = chip->block_markbad(mtd, ofs);
538 		nand_release_device(mtd);
539 	}
540 
541 	/* Mark block bad in BBT */
542 	if (chip->bbt) {
543 		res = nand_markbad_bbt(mtd, ofs);
544 		if (!ret)
545 			ret = res;
546 	}
547 
548 	if (!ret)
549 		mtd->ecc_stats.badblocks++;
550 
551 	return ret;
552 }
553 
554 /**
555  * nand_check_wp - [GENERIC] check if the chip is write protected
556  * @mtd: MTD device structure
557  *
558  * Check, if the device is write protected. The function expects, that the
559  * device is already selected.
560  */
nand_check_wp(struct mtd_info * mtd)561 static int nand_check_wp(struct mtd_info *mtd)
562 {
563 	struct nand_chip *chip = mtd_to_nand(mtd);
564 
565 	/* Broken xD cards report WP despite being writable */
566 	if (chip->options & NAND_BROKEN_XD)
567 		return 0;
568 
569 	/* Check the WP bit */
570 	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
571 	return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
572 }
573 
574 /**
575  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
576  * @mtd: MTD device structure
577  * @ofs: offset from device start
578  *
579  * Check if the block is marked as reserved.
580  */
nand_block_isreserved(struct mtd_info * mtd,loff_t ofs)581 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
582 {
583 	struct nand_chip *chip = mtd_to_nand(mtd);
584 
585 	if (!chip->bbt)
586 		return 0;
587 	/* Return info from the table */
588 	return nand_isreserved_bbt(mtd, ofs);
589 }
590 
591 /**
592  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
593  * @mtd: MTD device structure
594  * @ofs: offset from device start
595  * @allowbbt: 1, if its allowed to access the bbt area
596  *
597  * Check, if the block is bad. Either by reading the bad block table or
598  * calling of the scan function.
599  */
nand_block_checkbad(struct mtd_info * mtd,loff_t ofs,int allowbbt)600 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
601 {
602 	struct nand_chip *chip = mtd_to_nand(mtd);
603 
604 	if (!chip->bbt)
605 		return chip->block_bad(mtd, ofs);
606 
607 	/* Return info from the table */
608 	return nand_isbad_bbt(mtd, ofs, allowbbt);
609 }
610 
611 /**
612  * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
613  * @mtd: MTD device structure
614  * @timeo: Timeout
615  *
616  * Helper function for nand_wait_ready used when needing to wait in interrupt
617  * context.
618  */
panic_nand_wait_ready(struct mtd_info * mtd,unsigned long timeo)619 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
620 {
621 	struct nand_chip *chip = mtd_to_nand(mtd);
622 	int i;
623 
624 	/* Wait for the device to get ready */
625 	for (i = 0; i < timeo; i++) {
626 		if (chip->dev_ready(mtd))
627 			break;
628 		touch_softlockup_watchdog();
629 		mdelay(1);
630 	}
631 }
632 
633 /**
634  * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
635  * @mtd: MTD device structure
636  *
637  * Wait for the ready pin after a command, and warn if a timeout occurs.
638  */
nand_wait_ready(struct mtd_info * mtd)639 void nand_wait_ready(struct mtd_info *mtd)
640 {
641 	struct nand_chip *chip = mtd_to_nand(mtd);
642 	unsigned long timeo = 400;
643 
644 	if (in_interrupt() || oops_in_progress)
645 		return panic_nand_wait_ready(mtd, timeo);
646 
647 	/* Wait until command is processed or timeout occurs */
648 	timeo = jiffies + msecs_to_jiffies(timeo);
649 	do {
650 		if (chip->dev_ready(mtd))
651 			return;
652 		cond_resched();
653 	} while (time_before(jiffies, timeo));
654 
655 	if (!chip->dev_ready(mtd))
656 		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
657 }
658 EXPORT_SYMBOL_GPL(nand_wait_ready);
659 
660 /**
661  * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
662  * @mtd: MTD device structure
663  * @timeo: Timeout in ms
664  *
665  * Wait for status ready (i.e. command done) or timeout.
666  */
nand_wait_status_ready(struct mtd_info * mtd,unsigned long timeo)667 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
668 {
669 	register struct nand_chip *chip = mtd_to_nand(mtd);
670 
671 	timeo = jiffies + msecs_to_jiffies(timeo);
672 	do {
673 		if ((chip->read_byte(mtd) & NAND_STATUS_READY))
674 			break;
675 		touch_softlockup_watchdog();
676 	} while (time_before(jiffies, timeo));
677 };
678 
679 /**
680  * nand_command - [DEFAULT] Send command to NAND device
681  * @mtd: MTD device structure
682  * @command: the command to be sent
683  * @column: the column address for this command, -1 if none
684  * @page_addr: the page address for this command, -1 if none
685  *
686  * Send command to NAND device. This function is used for small page devices
687  * (512 Bytes per page).
688  */
nand_command(struct mtd_info * mtd,unsigned int command,int column,int page_addr)689 static void nand_command(struct mtd_info *mtd, unsigned int command,
690 			 int column, int page_addr)
691 {
692 	register struct nand_chip *chip = mtd_to_nand(mtd);
693 	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
694 
695 	/* Write out the command to the device */
696 	if (command == NAND_CMD_SEQIN) {
697 		int readcmd;
698 
699 		if (column >= mtd->writesize) {
700 			/* OOB area */
701 			column -= mtd->writesize;
702 			readcmd = NAND_CMD_READOOB;
703 		} else if (column < 256) {
704 			/* First 256 bytes --> READ0 */
705 			readcmd = NAND_CMD_READ0;
706 		} else {
707 			column -= 256;
708 			readcmd = NAND_CMD_READ1;
709 		}
710 		chip->cmd_ctrl(mtd, readcmd, ctrl);
711 		ctrl &= ~NAND_CTRL_CHANGE;
712 	}
713 	if (command != NAND_CMD_NONE)
714 		chip->cmd_ctrl(mtd, command, ctrl);
715 
716 	/* Address cycle, when necessary */
717 	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
718 	/* Serially input address */
719 	if (column != -1) {
720 		/* Adjust columns for 16 bit buswidth */
721 		if (chip->options & NAND_BUSWIDTH_16 &&
722 				!nand_opcode_8bits(command))
723 			column >>= 1;
724 		chip->cmd_ctrl(mtd, column, ctrl);
725 		ctrl &= ~NAND_CTRL_CHANGE;
726 	}
727 	if (page_addr != -1) {
728 		chip->cmd_ctrl(mtd, page_addr, ctrl);
729 		ctrl &= ~NAND_CTRL_CHANGE;
730 		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
731 		/* One more address cycle for devices > 32MiB */
732 		if (chip->chipsize > (32 << 20))
733 			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
734 	}
735 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
736 
737 	/*
738 	 * Program and erase have their own busy handlers status and sequential
739 	 * in needs no delay
740 	 */
741 	switch (command) {
742 
743 	case NAND_CMD_NONE:
744 	case NAND_CMD_PAGEPROG:
745 	case NAND_CMD_ERASE1:
746 	case NAND_CMD_ERASE2:
747 	case NAND_CMD_SEQIN:
748 	case NAND_CMD_STATUS:
749 	case NAND_CMD_READID:
750 	case NAND_CMD_SET_FEATURES:
751 		return;
752 
753 	case NAND_CMD_RESET:
754 		if (chip->dev_ready)
755 			break;
756 		udelay(chip->chip_delay);
757 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
758 			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
759 		chip->cmd_ctrl(mtd,
760 			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
761 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
762 		nand_wait_status_ready(mtd, 250);
763 		return;
764 
765 		/* This applies to read commands */
766 	case NAND_CMD_READ0:
767 		/*
768 		 * READ0 is sometimes used to exit GET STATUS mode. When this
769 		 * is the case no address cycles are requested, and we can use
770 		 * this information to detect that we should not wait for the
771 		 * device to be ready.
772 		 */
773 		if (column == -1 && page_addr == -1)
774 			return;
775 
776 	default:
777 		/*
778 		 * If we don't have access to the busy pin, we apply the given
779 		 * command delay
780 		 */
781 		if (!chip->dev_ready) {
782 			udelay(chip->chip_delay);
783 			return;
784 		}
785 	}
786 	/*
787 	 * Apply this short delay always to ensure that we do wait tWB in
788 	 * any case on any machine.
789 	 */
790 	ndelay(100);
791 
792 	nand_wait_ready(mtd);
793 }
794 
nand_ccs_delay(struct nand_chip * chip)795 static void nand_ccs_delay(struct nand_chip *chip)
796 {
797 	/*
798 	 * The controller already takes care of waiting for tCCS when the RNDIN
799 	 * or RNDOUT command is sent, return directly.
800 	 */
801 	if (!(chip->options & NAND_WAIT_TCCS))
802 		return;
803 
804 	/*
805 	 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
806 	 * (which should be safe for all NANDs).
807 	 */
808 	if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
809 		ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
810 	else
811 		ndelay(500);
812 }
813 
814 /**
815  * nand_command_lp - [DEFAULT] Send command to NAND large page device
816  * @mtd: MTD device structure
817  * @command: the command to be sent
818  * @column: the column address for this command, -1 if none
819  * @page_addr: the page address for this command, -1 if none
820  *
821  * Send command to NAND device. This is the version for the new large page
822  * devices. We don't have the separate regions as we have in the small page
823  * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
824  */
nand_command_lp(struct mtd_info * mtd,unsigned int command,int column,int page_addr)825 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
826 			    int column, int page_addr)
827 {
828 	register struct nand_chip *chip = mtd_to_nand(mtd);
829 
830 	/* Emulate NAND_CMD_READOOB */
831 	if (command == NAND_CMD_READOOB) {
832 		column += mtd->writesize;
833 		command = NAND_CMD_READ0;
834 	}
835 
836 	/* Command latch cycle */
837 	if (command != NAND_CMD_NONE)
838 		chip->cmd_ctrl(mtd, command,
839 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
840 
841 	if (column != -1 || page_addr != -1) {
842 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
843 
844 		/* Serially input address */
845 		if (column != -1) {
846 			/* Adjust columns for 16 bit buswidth */
847 			if (chip->options & NAND_BUSWIDTH_16 &&
848 					!nand_opcode_8bits(command))
849 				column >>= 1;
850 			chip->cmd_ctrl(mtd, column, ctrl);
851 			ctrl &= ~NAND_CTRL_CHANGE;
852 
853 			/* Only output a single addr cycle for 8bits opcodes. */
854 			if (!nand_opcode_8bits(command))
855 				chip->cmd_ctrl(mtd, column >> 8, ctrl);
856 		}
857 		if (page_addr != -1) {
858 			chip->cmd_ctrl(mtd, page_addr, ctrl);
859 			chip->cmd_ctrl(mtd, page_addr >> 8,
860 				       NAND_NCE | NAND_ALE);
861 			/* One more address cycle for devices > 128MiB */
862 			if (chip->chipsize > (128 << 20))
863 				chip->cmd_ctrl(mtd, page_addr >> 16,
864 					       NAND_NCE | NAND_ALE);
865 		}
866 	}
867 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
868 
869 	/*
870 	 * Program and erase have their own busy handlers status, sequential
871 	 * in and status need no delay.
872 	 */
873 	switch (command) {
874 
875 	case NAND_CMD_NONE:
876 	case NAND_CMD_CACHEDPROG:
877 	case NAND_CMD_PAGEPROG:
878 	case NAND_CMD_ERASE1:
879 	case NAND_CMD_ERASE2:
880 	case NAND_CMD_SEQIN:
881 	case NAND_CMD_STATUS:
882 	case NAND_CMD_READID:
883 	case NAND_CMD_SET_FEATURES:
884 		return;
885 
886 	case NAND_CMD_RNDIN:
887 		nand_ccs_delay(chip);
888 		return;
889 
890 	case NAND_CMD_RESET:
891 		if (chip->dev_ready)
892 			break;
893 		udelay(chip->chip_delay);
894 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
895 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
896 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
897 			       NAND_NCE | NAND_CTRL_CHANGE);
898 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
899 		nand_wait_status_ready(mtd, 250);
900 		return;
901 
902 	case NAND_CMD_RNDOUT:
903 		/* No ready / busy check necessary */
904 		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
905 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
906 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
907 			       NAND_NCE | NAND_CTRL_CHANGE);
908 
909 		nand_ccs_delay(chip);
910 		return;
911 
912 	case NAND_CMD_READ0:
913 		/*
914 		 * READ0 is sometimes used to exit GET STATUS mode. When this
915 		 * is the case no address cycles are requested, and we can use
916 		 * this information to detect that READSTART should not be
917 		 * issued.
918 		 */
919 		if (column == -1 && page_addr == -1)
920 			return;
921 
922 		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
923 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
924 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
925 			       NAND_NCE | NAND_CTRL_CHANGE);
926 
927 		/* This applies to read commands */
928 	default:
929 		/*
930 		 * If we don't have access to the busy pin, we apply the given
931 		 * command delay.
932 		 */
933 		if (!chip->dev_ready) {
934 			udelay(chip->chip_delay);
935 			return;
936 		}
937 	}
938 
939 	/*
940 	 * Apply this short delay always to ensure that we do wait tWB in
941 	 * any case on any machine.
942 	 */
943 	ndelay(100);
944 
945 	nand_wait_ready(mtd);
946 }
947 
948 /**
949  * panic_nand_get_device - [GENERIC] Get chip for selected access
950  * @chip: the nand chip descriptor
951  * @mtd: MTD device structure
952  * @new_state: the state which is requested
953  *
954  * Used when in panic, no locks are taken.
955  */
panic_nand_get_device(struct nand_chip * chip,struct mtd_info * mtd,int new_state)956 static void panic_nand_get_device(struct nand_chip *chip,
957 		      struct mtd_info *mtd, int new_state)
958 {
959 	/* Hardware controller shared among independent devices */
960 	chip->controller->active = chip;
961 	chip->state = new_state;
962 }
963 
964 /**
965  * nand_get_device - [GENERIC] Get chip for selected access
966  * @mtd: MTD device structure
967  * @new_state: the state which is requested
968  *
969  * Get the device and lock it for exclusive access
970  */
971 static int
nand_get_device(struct mtd_info * mtd,int new_state)972 nand_get_device(struct mtd_info *mtd, int new_state)
973 {
974 	struct nand_chip *chip = mtd_to_nand(mtd);
975 	spinlock_t *lock = &chip->controller->lock;
976 	wait_queue_head_t *wq = &chip->controller->wq;
977 	DECLARE_WAITQUEUE(wait, current);
978 retry:
979 	spin_lock(lock);
980 
981 	/* Hardware controller shared among independent devices */
982 	if (!chip->controller->active)
983 		chip->controller->active = chip;
984 
985 	if (chip->controller->active == chip && chip->state == FL_READY) {
986 		chip->state = new_state;
987 		spin_unlock(lock);
988 		return 0;
989 	}
990 	if (new_state == FL_PM_SUSPENDED) {
991 		if (chip->controller->active->state == FL_PM_SUSPENDED) {
992 			chip->state = FL_PM_SUSPENDED;
993 			spin_unlock(lock);
994 			return 0;
995 		}
996 	}
997 	set_current_state(TASK_UNINTERRUPTIBLE);
998 	add_wait_queue(wq, &wait);
999 	spin_unlock(lock);
1000 	schedule();
1001 	remove_wait_queue(wq, &wait);
1002 	goto retry;
1003 }
1004 
1005 /**
1006  * panic_nand_wait - [GENERIC] wait until the command is done
1007  * @mtd: MTD device structure
1008  * @chip: NAND chip structure
1009  * @timeo: timeout
1010  *
1011  * Wait for command done. This is a helper function for nand_wait used when
1012  * we are in interrupt context. May happen when in panic and trying to write
1013  * an oops through mtdoops.
1014  */
panic_nand_wait(struct mtd_info * mtd,struct nand_chip * chip,unsigned long timeo)1015 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1016 			    unsigned long timeo)
1017 {
1018 	int i;
1019 	for (i = 0; i < timeo; i++) {
1020 		if (chip->dev_ready) {
1021 			if (chip->dev_ready(mtd))
1022 				break;
1023 		} else {
1024 			if (chip->read_byte(mtd) & NAND_STATUS_READY)
1025 				break;
1026 		}
1027 		mdelay(1);
1028 	}
1029 }
1030 
1031 /**
1032  * nand_wait - [DEFAULT] wait until the command is done
1033  * @mtd: MTD device structure
1034  * @chip: NAND chip structure
1035  *
1036  * Wait for command done. This applies to erase and program only.
1037  */
nand_wait(struct mtd_info * mtd,struct nand_chip * chip)1038 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1039 {
1040 
1041 	int status;
1042 	unsigned long timeo = 400;
1043 
1044 	/*
1045 	 * Apply this short delay always to ensure that we do wait tWB in any
1046 	 * case on any machine.
1047 	 */
1048 	ndelay(100);
1049 
1050 	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1051 
1052 	if (in_interrupt() || oops_in_progress)
1053 		panic_nand_wait(mtd, chip, timeo);
1054 	else {
1055 		timeo = jiffies + msecs_to_jiffies(timeo);
1056 		do {
1057 			if (chip->dev_ready) {
1058 				if (chip->dev_ready(mtd))
1059 					break;
1060 			} else {
1061 				if (chip->read_byte(mtd) & NAND_STATUS_READY)
1062 					break;
1063 			}
1064 			cond_resched();
1065 		} while (time_before(jiffies, timeo));
1066 	}
1067 
1068 	status = (int)chip->read_byte(mtd);
1069 	/* This can happen if in case of timeout or buggy dev_ready */
1070 	WARN_ON(!(status & NAND_STATUS_READY));
1071 	return status;
1072 }
1073 
1074 /**
1075  * nand_reset_data_interface - Reset data interface and timings
1076  * @chip: The NAND chip
1077  * @chipnr: Internal die id
1078  *
1079  * Reset the Data interface and timings to ONFI mode 0.
1080  *
1081  * Returns 0 for success or negative error code otherwise.
1082  */
nand_reset_data_interface(struct nand_chip * chip,int chipnr)1083 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1084 {
1085 	struct mtd_info *mtd = nand_to_mtd(chip);
1086 	const struct nand_data_interface *conf;
1087 	int ret;
1088 
1089 	if (!chip->setup_data_interface)
1090 		return 0;
1091 
1092 	/*
1093 	 * The ONFI specification says:
1094 	 * "
1095 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
1096 	 * interface, the host shall use the Reset (FFh) command
1097 	 * using SDR timing mode 0. A device in any timing mode is
1098 	 * required to recognize Reset (FFh) command issued in SDR
1099 	 * timing mode 0.
1100 	 * "
1101 	 *
1102 	 * Configure the data interface in SDR mode and set the
1103 	 * timings to timing mode 0.
1104 	 */
1105 
1106 	conf = nand_get_default_data_interface();
1107 	ret = chip->setup_data_interface(mtd, chipnr, conf);
1108 	if (ret)
1109 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
1110 
1111 	return ret;
1112 }
1113 
1114 /**
1115  * nand_setup_data_interface - Setup the best data interface and timings
1116  * @chip: The NAND chip
1117  * @chipnr: Internal die id
1118  *
1119  * Find and configure the best data interface and NAND timings supported by
1120  * the chip and the driver.
1121  * First tries to retrieve supported timing modes from ONFI information,
1122  * and if the NAND chip does not support ONFI, relies on the
1123  * ->onfi_timing_mode_default specified in the nand_ids table.
1124  *
1125  * Returns 0 for success or negative error code otherwise.
1126  */
nand_setup_data_interface(struct nand_chip * chip,int chipnr)1127 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1128 {
1129 	struct mtd_info *mtd = nand_to_mtd(chip);
1130 	int ret;
1131 
1132 	if (!chip->setup_data_interface || !chip->data_interface)
1133 		return 0;
1134 
1135 	/*
1136 	 * Ensure the timing mode has been changed on the chip side
1137 	 * before changing timings on the controller side.
1138 	 */
1139 	if (chip->onfi_version &&
1140 	    (le16_to_cpu(chip->onfi_params.opt_cmd) &
1141 	     ONFI_OPT_CMD_SET_GET_FEATURES)) {
1142 		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1143 			chip->onfi_timing_mode_default,
1144 		};
1145 
1146 		ret = chip->onfi_set_features(mtd, chip,
1147 				ONFI_FEATURE_ADDR_TIMING_MODE,
1148 				tmode_param);
1149 		if (ret)
1150 			goto err;
1151 	}
1152 
1153 	ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1154 err:
1155 	return ret;
1156 }
1157 
1158 /**
1159  * nand_init_data_interface - find the best data interface and timings
1160  * @chip: The NAND chip
1161  *
1162  * Find the best data interface and NAND timings supported by the chip
1163  * and the driver.
1164  * First tries to retrieve supported timing modes from ONFI information,
1165  * and if the NAND chip does not support ONFI, relies on the
1166  * ->onfi_timing_mode_default specified in the nand_ids table. After this
1167  * function nand_chip->data_interface is initialized with the best timing mode
1168  * available.
1169  *
1170  * Returns 0 for success or negative error code otherwise.
1171  */
nand_init_data_interface(struct nand_chip * chip)1172 static int nand_init_data_interface(struct nand_chip *chip)
1173 {
1174 	struct mtd_info *mtd = nand_to_mtd(chip);
1175 	int modes, mode, ret;
1176 
1177 	if (!chip->setup_data_interface)
1178 		return 0;
1179 
1180 	/*
1181 	 * First try to identify the best timings from ONFI parameters and
1182 	 * if the NAND does not support ONFI, fallback to the default ONFI
1183 	 * timing mode.
1184 	 */
1185 	modes = onfi_get_async_timing_mode(chip);
1186 	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1187 		if (!chip->onfi_timing_mode_default)
1188 			return 0;
1189 
1190 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
1191 	}
1192 
1193 	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1194 				       GFP_KERNEL);
1195 	if (!chip->data_interface)
1196 		return -ENOMEM;
1197 
1198 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
1199 		ret = onfi_init_data_interface(chip, chip->data_interface,
1200 					       NAND_SDR_IFACE, mode);
1201 		if (ret)
1202 			continue;
1203 
1204 		/* Pass -1 to only */
1205 		ret = chip->setup_data_interface(mtd,
1206 						 NAND_DATA_IFACE_CHECK_ONLY,
1207 						 chip->data_interface);
1208 		if (!ret) {
1209 			chip->onfi_timing_mode_default = mode;
1210 			break;
1211 		}
1212 	}
1213 
1214 	return 0;
1215 }
1216 
nand_release_data_interface(struct nand_chip * chip)1217 static void nand_release_data_interface(struct nand_chip *chip)
1218 {
1219 	kfree(chip->data_interface);
1220 }
1221 
1222 /**
1223  * nand_reset - Reset and initialize a NAND device
1224  * @chip: The NAND chip
1225  * @chipnr: Internal die id
1226  *
1227  * Returns 0 for success or negative error code otherwise
1228  */
nand_reset(struct nand_chip * chip,int chipnr)1229 int nand_reset(struct nand_chip *chip, int chipnr)
1230 {
1231 	struct mtd_info *mtd = nand_to_mtd(chip);
1232 	int ret;
1233 
1234 	ret = nand_reset_data_interface(chip, chipnr);
1235 	if (ret)
1236 		return ret;
1237 
1238 	/*
1239 	 * The CS line has to be released before we can apply the new NAND
1240 	 * interface settings, hence this weird ->select_chip() dance.
1241 	 */
1242 	chip->select_chip(mtd, chipnr);
1243 	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1244 	chip->select_chip(mtd, -1);
1245 
1246 	chip->select_chip(mtd, chipnr);
1247 	ret = nand_setup_data_interface(chip, chipnr);
1248 	chip->select_chip(mtd, -1);
1249 	if (ret)
1250 		return ret;
1251 
1252 	return 0;
1253 }
1254 EXPORT_SYMBOL_GPL(nand_reset);
1255 
1256 /**
1257  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1258  * @buf: buffer to test
1259  * @len: buffer length
1260  * @bitflips_threshold: maximum number of bitflips
1261  *
1262  * Check if a buffer contains only 0xff, which means the underlying region
1263  * has been erased and is ready to be programmed.
1264  * The bitflips_threshold specify the maximum number of bitflips before
1265  * considering the region is not erased.
1266  * Note: The logic of this function has been extracted from the memweight
1267  * implementation, except that nand_check_erased_buf function exit before
1268  * testing the whole buffer if the number of bitflips exceed the
1269  * bitflips_threshold value.
1270  *
1271  * Returns a positive number of bitflips less than or equal to
1272  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1273  * threshold.
1274  */
nand_check_erased_buf(void * buf,int len,int bitflips_threshold)1275 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1276 {
1277 	const unsigned char *bitmap = buf;
1278 	int bitflips = 0;
1279 	int weight;
1280 
1281 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
1282 	     len--, bitmap++) {
1283 		weight = hweight8(*bitmap);
1284 		bitflips += BITS_PER_BYTE - weight;
1285 		if (unlikely(bitflips > bitflips_threshold))
1286 			return -EBADMSG;
1287 	}
1288 
1289 	for (; len >= sizeof(long);
1290 	     len -= sizeof(long), bitmap += sizeof(long)) {
1291 		unsigned long d = *((unsigned long *)bitmap);
1292 		if (d == ~0UL)
1293 			continue;
1294 		weight = hweight_long(d);
1295 		bitflips += BITS_PER_LONG - weight;
1296 		if (unlikely(bitflips > bitflips_threshold))
1297 			return -EBADMSG;
1298 	}
1299 
1300 	for (; len > 0; len--, bitmap++) {
1301 		weight = hweight8(*bitmap);
1302 		bitflips += BITS_PER_BYTE - weight;
1303 		if (unlikely(bitflips > bitflips_threshold))
1304 			return -EBADMSG;
1305 	}
1306 
1307 	return bitflips;
1308 }
1309 
1310 /**
1311  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1312  *				 0xff data
1313  * @data: data buffer to test
1314  * @datalen: data length
1315  * @ecc: ECC buffer
1316  * @ecclen: ECC length
1317  * @extraoob: extra OOB buffer
1318  * @extraooblen: extra OOB length
1319  * @bitflips_threshold: maximum number of bitflips
1320  *
1321  * Check if a data buffer and its associated ECC and OOB data contains only
1322  * 0xff pattern, which means the underlying region has been erased and is
1323  * ready to be programmed.
1324  * The bitflips_threshold specify the maximum number of bitflips before
1325  * considering the region as not erased.
1326  *
1327  * Note:
1328  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1329  *    different from the NAND page size. When fixing bitflips, ECC engines will
1330  *    report the number of errors per chunk, and the NAND core infrastructure
1331  *    expect you to return the maximum number of bitflips for the whole page.
1332  *    This is why you should always use this function on a single chunk and
1333  *    not on the whole page. After checking each chunk you should update your
1334  *    max_bitflips value accordingly.
1335  * 2/ When checking for bitflips in erased pages you should not only check
1336  *    the payload data but also their associated ECC data, because a user might
1337  *    have programmed almost all bits to 1 but a few. In this case, we
1338  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
1339  *    this case.
1340  * 3/ The extraoob argument is optional, and should be used if some of your OOB
1341  *    data are protected by the ECC engine.
1342  *    It could also be used if you support subpages and want to attach some
1343  *    extra OOB data to an ECC chunk.
1344  *
1345  * Returns a positive number of bitflips less than or equal to
1346  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1347  * threshold. In case of success, the passed buffers are filled with 0xff.
1348  */
nand_check_erased_ecc_chunk(void * data,int datalen,void * ecc,int ecclen,void * extraoob,int extraooblen,int bitflips_threshold)1349 int nand_check_erased_ecc_chunk(void *data, int datalen,
1350 				void *ecc, int ecclen,
1351 				void *extraoob, int extraooblen,
1352 				int bitflips_threshold)
1353 {
1354 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1355 
1356 	data_bitflips = nand_check_erased_buf(data, datalen,
1357 					      bitflips_threshold);
1358 	if (data_bitflips < 0)
1359 		return data_bitflips;
1360 
1361 	bitflips_threshold -= data_bitflips;
1362 
1363 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1364 	if (ecc_bitflips < 0)
1365 		return ecc_bitflips;
1366 
1367 	bitflips_threshold -= ecc_bitflips;
1368 
1369 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1370 						  bitflips_threshold);
1371 	if (extraoob_bitflips < 0)
1372 		return extraoob_bitflips;
1373 
1374 	if (data_bitflips)
1375 		memset(data, 0xff, datalen);
1376 
1377 	if (ecc_bitflips)
1378 		memset(ecc, 0xff, ecclen);
1379 
1380 	if (extraoob_bitflips)
1381 		memset(extraoob, 0xff, extraooblen);
1382 
1383 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
1384 }
1385 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1386 
1387 /**
1388  * nand_read_page_raw - [INTERN] read raw page data without ecc
1389  * @mtd: mtd info structure
1390  * @chip: nand chip info structure
1391  * @buf: buffer to store read data
1392  * @oob_required: caller requires OOB data read to chip->oob_poi
1393  * @page: page number to read
1394  *
1395  * Not for syndrome calculating ECC controllers, which use a special oob layout.
1396  */
nand_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1397 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1398 		       uint8_t *buf, int oob_required, int page)
1399 {
1400 	chip->read_buf(mtd, buf, mtd->writesize);
1401 	if (oob_required)
1402 		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1403 	return 0;
1404 }
1405 EXPORT_SYMBOL(nand_read_page_raw);
1406 
1407 /**
1408  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1409  * @mtd: mtd info structure
1410  * @chip: nand chip info structure
1411  * @buf: buffer to store read data
1412  * @oob_required: caller requires OOB data read to chip->oob_poi
1413  * @page: page number to read
1414  *
1415  * We need a special oob layout and handling even when OOB isn't used.
1416  */
nand_read_page_raw_syndrome(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1417 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1418 				       struct nand_chip *chip, uint8_t *buf,
1419 				       int oob_required, int page)
1420 {
1421 	int eccsize = chip->ecc.size;
1422 	int eccbytes = chip->ecc.bytes;
1423 	uint8_t *oob = chip->oob_poi;
1424 	int steps, size;
1425 
1426 	for (steps = chip->ecc.steps; steps > 0; steps--) {
1427 		chip->read_buf(mtd, buf, eccsize);
1428 		buf += eccsize;
1429 
1430 		if (chip->ecc.prepad) {
1431 			chip->read_buf(mtd, oob, chip->ecc.prepad);
1432 			oob += chip->ecc.prepad;
1433 		}
1434 
1435 		chip->read_buf(mtd, oob, eccbytes);
1436 		oob += eccbytes;
1437 
1438 		if (chip->ecc.postpad) {
1439 			chip->read_buf(mtd, oob, chip->ecc.postpad);
1440 			oob += chip->ecc.postpad;
1441 		}
1442 	}
1443 
1444 	size = mtd->oobsize - (oob - chip->oob_poi);
1445 	if (size)
1446 		chip->read_buf(mtd, oob, size);
1447 
1448 	return 0;
1449 }
1450 
1451 /**
1452  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1453  * @mtd: mtd info structure
1454  * @chip: nand chip info structure
1455  * @buf: buffer to store read data
1456  * @oob_required: caller requires OOB data read to chip->oob_poi
1457  * @page: page number to read
1458  */
nand_read_page_swecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1459 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1460 				uint8_t *buf, int oob_required, int page)
1461 {
1462 	int i, eccsize = chip->ecc.size, ret;
1463 	int eccbytes = chip->ecc.bytes;
1464 	int eccsteps = chip->ecc.steps;
1465 	uint8_t *p = buf;
1466 	uint8_t *ecc_calc = chip->buffers->ecccalc;
1467 	uint8_t *ecc_code = chip->buffers->ecccode;
1468 	unsigned int max_bitflips = 0;
1469 
1470 	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1471 
1472 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1473 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1474 
1475 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1476 					 chip->ecc.total);
1477 	if (ret)
1478 		return ret;
1479 
1480 	eccsteps = chip->ecc.steps;
1481 	p = buf;
1482 
1483 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1484 		int stat;
1485 
1486 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1487 		if (stat < 0) {
1488 			mtd->ecc_stats.failed++;
1489 		} else {
1490 			mtd->ecc_stats.corrected += stat;
1491 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1492 		}
1493 	}
1494 	return max_bitflips;
1495 }
1496 
1497 /**
1498  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1499  * @mtd: mtd info structure
1500  * @chip: nand chip info structure
1501  * @data_offs: offset of requested data within the page
1502  * @readlen: data length
1503  * @bufpoi: buffer to store read data
1504  * @page: page number to read
1505  */
nand_read_subpage(struct mtd_info * mtd,struct nand_chip * chip,uint32_t data_offs,uint32_t readlen,uint8_t * bufpoi,int page)1506 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1507 			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1508 			int page)
1509 {
1510 	int start_step, end_step, num_steps, ret;
1511 	uint8_t *p;
1512 	int data_col_addr, i, gaps = 0;
1513 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1514 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1515 	int index, section = 0;
1516 	unsigned int max_bitflips = 0;
1517 	struct mtd_oob_region oobregion = { };
1518 
1519 	/* Column address within the page aligned to ECC size (256bytes) */
1520 	start_step = data_offs / chip->ecc.size;
1521 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
1522 	num_steps = end_step - start_step + 1;
1523 	index = start_step * chip->ecc.bytes;
1524 
1525 	/* Data size aligned to ECC ecc.size */
1526 	datafrag_len = num_steps * chip->ecc.size;
1527 	eccfrag_len = num_steps * chip->ecc.bytes;
1528 
1529 	data_col_addr = start_step * chip->ecc.size;
1530 	/* If we read not a page aligned data */
1531 	if (data_col_addr != 0)
1532 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1533 
1534 	p = bufpoi + data_col_addr;
1535 	chip->read_buf(mtd, p, datafrag_len);
1536 
1537 	/* Calculate ECC */
1538 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1539 		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1540 
1541 	/*
1542 	 * The performance is faster if we position offsets according to
1543 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1544 	 */
1545 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1546 	if (ret)
1547 		return ret;
1548 
1549 	if (oobregion.length < eccfrag_len)
1550 		gaps = 1;
1551 
1552 	if (gaps) {
1553 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1554 		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1555 	} else {
1556 		/*
1557 		 * Send the command to read the particular ECC bytes take care
1558 		 * about buswidth alignment in read_buf.
1559 		 */
1560 		aligned_pos = oobregion.offset & ~(busw - 1);
1561 		aligned_len = eccfrag_len;
1562 		if (oobregion.offset & (busw - 1))
1563 			aligned_len++;
1564 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1565 		    (busw - 1))
1566 			aligned_len++;
1567 
1568 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1569 			      mtd->writesize + aligned_pos, -1);
1570 		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1571 	}
1572 
1573 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1574 					 chip->oob_poi, index, eccfrag_len);
1575 	if (ret)
1576 		return ret;
1577 
1578 	p = bufpoi + data_col_addr;
1579 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1580 		int stat;
1581 
1582 		stat = chip->ecc.correct(mtd, p,
1583 			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1584 		if (stat == -EBADMSG &&
1585 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1586 			/* check for empty pages with bitflips */
1587 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1588 						&chip->buffers->ecccode[i],
1589 						chip->ecc.bytes,
1590 						NULL, 0,
1591 						chip->ecc.strength);
1592 		}
1593 
1594 		if (stat < 0) {
1595 			mtd->ecc_stats.failed++;
1596 		} else {
1597 			mtd->ecc_stats.corrected += stat;
1598 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1599 		}
1600 	}
1601 	return max_bitflips;
1602 }
1603 
1604 /**
1605  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1606  * @mtd: mtd info structure
1607  * @chip: nand chip info structure
1608  * @buf: buffer to store read data
1609  * @oob_required: caller requires OOB data read to chip->oob_poi
1610  * @page: page number to read
1611  *
1612  * Not for syndrome calculating ECC controllers which need a special oob layout.
1613  */
nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1614 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1615 				uint8_t *buf, int oob_required, int page)
1616 {
1617 	int i, eccsize = chip->ecc.size, ret;
1618 	int eccbytes = chip->ecc.bytes;
1619 	int eccsteps = chip->ecc.steps;
1620 	uint8_t *p = buf;
1621 	uint8_t *ecc_calc = chip->buffers->ecccalc;
1622 	uint8_t *ecc_code = chip->buffers->ecccode;
1623 	unsigned int max_bitflips = 0;
1624 
1625 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1626 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1627 		chip->read_buf(mtd, p, eccsize);
1628 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1629 	}
1630 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1631 
1632 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1633 					 chip->ecc.total);
1634 	if (ret)
1635 		return ret;
1636 
1637 	eccsteps = chip->ecc.steps;
1638 	p = buf;
1639 
1640 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1641 		int stat;
1642 
1643 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1644 		if (stat == -EBADMSG &&
1645 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1646 			/* check for empty pages with bitflips */
1647 			stat = nand_check_erased_ecc_chunk(p, eccsize,
1648 						&ecc_code[i], eccbytes,
1649 						NULL, 0,
1650 						chip->ecc.strength);
1651 		}
1652 
1653 		if (stat < 0) {
1654 			mtd->ecc_stats.failed++;
1655 		} else {
1656 			mtd->ecc_stats.corrected += stat;
1657 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1658 		}
1659 	}
1660 	return max_bitflips;
1661 }
1662 
1663 /**
1664  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1665  * @mtd: mtd info structure
1666  * @chip: nand chip info structure
1667  * @buf: buffer to store read data
1668  * @oob_required: caller requires OOB data read to chip->oob_poi
1669  * @page: page number to read
1670  *
1671  * Hardware ECC for large page chips, require OOB to be read first. For this
1672  * ECC mode, the write_page method is re-used from ECC_HW. These methods
1673  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1674  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1675  * the data area, by overwriting the NAND manufacturer bad block markings.
1676  */
nand_read_page_hwecc_oob_first(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1677 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1678 	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1679 {
1680 	int i, eccsize = chip->ecc.size, ret;
1681 	int eccbytes = chip->ecc.bytes;
1682 	int eccsteps = chip->ecc.steps;
1683 	uint8_t *p = buf;
1684 	uint8_t *ecc_code = chip->buffers->ecccode;
1685 	uint8_t *ecc_calc = chip->buffers->ecccalc;
1686 	unsigned int max_bitflips = 0;
1687 
1688 	/* Read the OOB area first */
1689 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1690 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1691 	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1692 
1693 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1694 					 chip->ecc.total);
1695 	if (ret)
1696 		return ret;
1697 
1698 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1699 		int stat;
1700 
1701 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1702 		chip->read_buf(mtd, p, eccsize);
1703 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1704 
1705 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1706 		if (stat == -EBADMSG &&
1707 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1708 			/* check for empty pages with bitflips */
1709 			stat = nand_check_erased_ecc_chunk(p, eccsize,
1710 						&ecc_code[i], eccbytes,
1711 						NULL, 0,
1712 						chip->ecc.strength);
1713 		}
1714 
1715 		if (stat < 0) {
1716 			mtd->ecc_stats.failed++;
1717 		} else {
1718 			mtd->ecc_stats.corrected += stat;
1719 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1720 		}
1721 	}
1722 	return max_bitflips;
1723 }
1724 
1725 /**
1726  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1727  * @mtd: mtd info structure
1728  * @chip: nand chip info structure
1729  * @buf: buffer to store read data
1730  * @oob_required: caller requires OOB data read to chip->oob_poi
1731  * @page: page number to read
1732  *
1733  * The hw generator calculates the error syndrome automatically. Therefore we
1734  * need a special oob layout and handling.
1735  */
nand_read_page_syndrome(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1736 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1737 				   uint8_t *buf, int oob_required, int page)
1738 {
1739 	int i, eccsize = chip->ecc.size;
1740 	int eccbytes = chip->ecc.bytes;
1741 	int eccsteps = chip->ecc.steps;
1742 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1743 	uint8_t *p = buf;
1744 	uint8_t *oob = chip->oob_poi;
1745 	unsigned int max_bitflips = 0;
1746 
1747 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1748 		int stat;
1749 
1750 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1751 		chip->read_buf(mtd, p, eccsize);
1752 
1753 		if (chip->ecc.prepad) {
1754 			chip->read_buf(mtd, oob, chip->ecc.prepad);
1755 			oob += chip->ecc.prepad;
1756 		}
1757 
1758 		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1759 		chip->read_buf(mtd, oob, eccbytes);
1760 		stat = chip->ecc.correct(mtd, p, oob, NULL);
1761 
1762 		oob += eccbytes;
1763 
1764 		if (chip->ecc.postpad) {
1765 			chip->read_buf(mtd, oob, chip->ecc.postpad);
1766 			oob += chip->ecc.postpad;
1767 		}
1768 
1769 		if (stat == -EBADMSG &&
1770 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1771 			/* check for empty pages with bitflips */
1772 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1773 							   oob - eccpadbytes,
1774 							   eccpadbytes,
1775 							   NULL, 0,
1776 							   chip->ecc.strength);
1777 		}
1778 
1779 		if (stat < 0) {
1780 			mtd->ecc_stats.failed++;
1781 		} else {
1782 			mtd->ecc_stats.corrected += stat;
1783 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1784 		}
1785 	}
1786 
1787 	/* Calculate remaining oob bytes */
1788 	i = mtd->oobsize - (oob - chip->oob_poi);
1789 	if (i)
1790 		chip->read_buf(mtd, oob, i);
1791 
1792 	return max_bitflips;
1793 }
1794 
1795 /**
1796  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1797  * @mtd: mtd info structure
1798  * @oob: oob destination address
1799  * @ops: oob ops structure
1800  * @len: size of oob to transfer
1801  */
nand_transfer_oob(struct mtd_info * mtd,uint8_t * oob,struct mtd_oob_ops * ops,size_t len)1802 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1803 				  struct mtd_oob_ops *ops, size_t len)
1804 {
1805 	struct nand_chip *chip = mtd_to_nand(mtd);
1806 	int ret;
1807 
1808 	switch (ops->mode) {
1809 
1810 	case MTD_OPS_PLACE_OOB:
1811 	case MTD_OPS_RAW:
1812 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1813 		return oob + len;
1814 
1815 	case MTD_OPS_AUTO_OOB:
1816 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1817 						  ops->ooboffs, len);
1818 		BUG_ON(ret);
1819 		return oob + len;
1820 
1821 	default:
1822 		BUG();
1823 	}
1824 	return NULL;
1825 }
1826 
1827 /**
1828  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1829  * @mtd: MTD device structure
1830  * @retry_mode: the retry mode to use
1831  *
1832  * Some vendors supply a special command to shift the Vt threshold, to be used
1833  * when there are too many bitflips in a page (i.e., ECC error). After setting
1834  * a new threshold, the host should retry reading the page.
1835  */
nand_setup_read_retry(struct mtd_info * mtd,int retry_mode)1836 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1837 {
1838 	struct nand_chip *chip = mtd_to_nand(mtd);
1839 
1840 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
1841 
1842 	if (retry_mode >= chip->read_retries)
1843 		return -EINVAL;
1844 
1845 	if (!chip->setup_read_retry)
1846 		return -EOPNOTSUPP;
1847 
1848 	return chip->setup_read_retry(mtd, retry_mode);
1849 }
1850 
1851 /**
1852  * nand_do_read_ops - [INTERN] Read data with ECC
1853  * @mtd: MTD device structure
1854  * @from: offset to read from
1855  * @ops: oob ops structure
1856  *
1857  * Internal function. Called with chip held.
1858  */
nand_do_read_ops(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1859 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1860 			    struct mtd_oob_ops *ops)
1861 {
1862 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
1863 	struct nand_chip *chip = mtd_to_nand(mtd);
1864 	int ret = 0;
1865 	uint32_t readlen = ops->len;
1866 	uint32_t oobreadlen = ops->ooblen;
1867 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1868 
1869 	uint8_t *bufpoi, *oob, *buf;
1870 	int use_bufpoi;
1871 	unsigned int max_bitflips = 0;
1872 	int retry_mode = 0;
1873 	bool ecc_fail = false;
1874 
1875 	chipnr = (int)(from >> chip->chip_shift);
1876 	chip->select_chip(mtd, chipnr);
1877 
1878 	realpage = (int)(from >> chip->page_shift);
1879 	page = realpage & chip->pagemask;
1880 
1881 	col = (int)(from & (mtd->writesize - 1));
1882 
1883 	buf = ops->datbuf;
1884 	oob = ops->oobbuf;
1885 	oob_required = oob ? 1 : 0;
1886 
1887 	while (1) {
1888 		unsigned int ecc_failures = mtd->ecc_stats.failed;
1889 
1890 		bytes = min(mtd->writesize - col, readlen);
1891 		aligned = (bytes == mtd->writesize);
1892 
1893 		if (!aligned)
1894 			use_bufpoi = 1;
1895 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1896 			use_bufpoi = !virt_addr_valid(buf) ||
1897 				     !IS_ALIGNED((unsigned long)buf,
1898 						 chip->buf_align);
1899 		else
1900 			use_bufpoi = 0;
1901 
1902 		/* Is the current page in the buffer? */
1903 		if (realpage != chip->pagebuf || oob) {
1904 			bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1905 
1906 			if (use_bufpoi && aligned)
1907 				pr_debug("%s: using read bounce buffer for buf@%p\n",
1908 						 __func__, buf);
1909 
1910 read_retry:
1911 			if (nand_standard_page_accessors(&chip->ecc))
1912 				chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1913 
1914 			/*
1915 			 * Now read the page into the buffer.  Absent an error,
1916 			 * the read methods return max bitflips per ecc step.
1917 			 */
1918 			if (unlikely(ops->mode == MTD_OPS_RAW))
1919 				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1920 							      oob_required,
1921 							      page);
1922 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1923 				 !oob)
1924 				ret = chip->ecc.read_subpage(mtd, chip,
1925 							col, bytes, bufpoi,
1926 							page);
1927 			else
1928 				ret = chip->ecc.read_page(mtd, chip, bufpoi,
1929 							  oob_required, page);
1930 			if (ret < 0) {
1931 				if (use_bufpoi)
1932 					/* Invalidate page cache */
1933 					chip->pagebuf = -1;
1934 				break;
1935 			}
1936 
1937 			/* Transfer not aligned data */
1938 			if (use_bufpoi) {
1939 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1940 				    !(mtd->ecc_stats.failed - ecc_failures) &&
1941 				    (ops->mode != MTD_OPS_RAW)) {
1942 					chip->pagebuf = realpage;
1943 					chip->pagebuf_bitflips = ret;
1944 				} else {
1945 					/* Invalidate page cache */
1946 					chip->pagebuf = -1;
1947 				}
1948 				memcpy(buf, chip->buffers->databuf + col, bytes);
1949 			}
1950 
1951 			if (unlikely(oob)) {
1952 				int toread = min(oobreadlen, max_oobsize);
1953 
1954 				if (toread) {
1955 					oob = nand_transfer_oob(mtd,
1956 						oob, ops, toread);
1957 					oobreadlen -= toread;
1958 				}
1959 			}
1960 
1961 			if (chip->options & NAND_NEED_READRDY) {
1962 				/* Apply delay or wait for ready/busy pin */
1963 				if (!chip->dev_ready)
1964 					udelay(chip->chip_delay);
1965 				else
1966 					nand_wait_ready(mtd);
1967 			}
1968 
1969 			if (mtd->ecc_stats.failed - ecc_failures) {
1970 				if (retry_mode + 1 < chip->read_retries) {
1971 					retry_mode++;
1972 					ret = nand_setup_read_retry(mtd,
1973 							retry_mode);
1974 					if (ret < 0)
1975 						break;
1976 
1977 					/* Reset failures; retry */
1978 					mtd->ecc_stats.failed = ecc_failures;
1979 					goto read_retry;
1980 				} else {
1981 					/* No more retry modes; real failure */
1982 					ecc_fail = true;
1983 				}
1984 			}
1985 
1986 			buf += bytes;
1987 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
1988 		} else {
1989 			memcpy(buf, chip->buffers->databuf + col, bytes);
1990 			buf += bytes;
1991 			max_bitflips = max_t(unsigned int, max_bitflips,
1992 					     chip->pagebuf_bitflips);
1993 		}
1994 
1995 		readlen -= bytes;
1996 
1997 		/* Reset to retry mode 0 */
1998 		if (retry_mode) {
1999 			ret = nand_setup_read_retry(mtd, 0);
2000 			if (ret < 0)
2001 				break;
2002 			retry_mode = 0;
2003 		}
2004 
2005 		if (!readlen)
2006 			break;
2007 
2008 		/* For subsequent reads align to page boundary */
2009 		col = 0;
2010 		/* Increment page address */
2011 		realpage++;
2012 
2013 		page = realpage & chip->pagemask;
2014 		/* Check, if we cross a chip boundary */
2015 		if (!page) {
2016 			chipnr++;
2017 			chip->select_chip(mtd, -1);
2018 			chip->select_chip(mtd, chipnr);
2019 		}
2020 	}
2021 	chip->select_chip(mtd, -1);
2022 
2023 	ops->retlen = ops->len - (size_t) readlen;
2024 	if (oob)
2025 		ops->oobretlen = ops->ooblen - oobreadlen;
2026 
2027 	if (ret < 0)
2028 		return ret;
2029 
2030 	if (ecc_fail)
2031 		return -EBADMSG;
2032 
2033 	return max_bitflips;
2034 }
2035 
2036 /**
2037  * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2038  * @mtd: MTD device structure
2039  * @from: offset to read from
2040  * @len: number of bytes to read
2041  * @retlen: pointer to variable to store the number of read bytes
2042  * @buf: the databuffer to put data
2043  *
2044  * Get hold of the chip and call nand_do_read.
2045  */
nand_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,uint8_t * buf)2046 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2047 		     size_t *retlen, uint8_t *buf)
2048 {
2049 	struct mtd_oob_ops ops;
2050 	int ret;
2051 
2052 	nand_get_device(mtd, FL_READING);
2053 	memset(&ops, 0, sizeof(ops));
2054 	ops.len = len;
2055 	ops.datbuf = buf;
2056 	ops.mode = MTD_OPS_PLACE_OOB;
2057 	ret = nand_do_read_ops(mtd, from, &ops);
2058 	*retlen = ops.retlen;
2059 	nand_release_device(mtd);
2060 	return ret;
2061 }
2062 
2063 /**
2064  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2065  * @mtd: mtd info structure
2066  * @chip: nand chip info structure
2067  * @page: page number to read
2068  */
nand_read_oob_std(struct mtd_info * mtd,struct nand_chip * chip,int page)2069 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2070 {
2071 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2072 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2073 	return 0;
2074 }
2075 EXPORT_SYMBOL(nand_read_oob_std);
2076 
2077 /**
2078  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2079  *			    with syndromes
2080  * @mtd: mtd info structure
2081  * @chip: nand chip info structure
2082  * @page: page number to read
2083  */
nand_read_oob_syndrome(struct mtd_info * mtd,struct nand_chip * chip,int page)2084 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2085 			   int page)
2086 {
2087 	int length = mtd->oobsize;
2088 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2089 	int eccsize = chip->ecc.size;
2090 	uint8_t *bufpoi = chip->oob_poi;
2091 	int i, toread, sndrnd = 0, pos;
2092 
2093 	chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2094 	for (i = 0; i < chip->ecc.steps; i++) {
2095 		if (sndrnd) {
2096 			pos = eccsize + i * (eccsize + chunk);
2097 			if (mtd->writesize > 512)
2098 				chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2099 			else
2100 				chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2101 		} else
2102 			sndrnd = 1;
2103 		toread = min_t(int, length, chunk);
2104 		chip->read_buf(mtd, bufpoi, toread);
2105 		bufpoi += toread;
2106 		length -= toread;
2107 	}
2108 	if (length > 0)
2109 		chip->read_buf(mtd, bufpoi, length);
2110 
2111 	return 0;
2112 }
2113 EXPORT_SYMBOL(nand_read_oob_syndrome);
2114 
2115 /**
2116  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2117  * @mtd: mtd info structure
2118  * @chip: nand chip info structure
2119  * @page: page number to write
2120  */
nand_write_oob_std(struct mtd_info * mtd,struct nand_chip * chip,int page)2121 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2122 {
2123 	int status = 0;
2124 	const uint8_t *buf = chip->oob_poi;
2125 	int length = mtd->oobsize;
2126 
2127 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2128 	chip->write_buf(mtd, buf, length);
2129 	/* Send command to program the OOB data */
2130 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2131 
2132 	status = chip->waitfunc(mtd, chip);
2133 
2134 	return status & NAND_STATUS_FAIL ? -EIO : 0;
2135 }
2136 EXPORT_SYMBOL(nand_write_oob_std);
2137 
2138 /**
2139  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2140  *			     with syndrome - only for large page flash
2141  * @mtd: mtd info structure
2142  * @chip: nand chip info structure
2143  * @page: page number to write
2144  */
nand_write_oob_syndrome(struct mtd_info * mtd,struct nand_chip * chip,int page)2145 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2146 			    int page)
2147 {
2148 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2149 	int eccsize = chip->ecc.size, length = mtd->oobsize;
2150 	int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2151 	const uint8_t *bufpoi = chip->oob_poi;
2152 
2153 	/*
2154 	 * data-ecc-data-ecc ... ecc-oob
2155 	 * or
2156 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2157 	 */
2158 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
2159 		pos = steps * (eccsize + chunk);
2160 		steps = 0;
2161 	} else
2162 		pos = eccsize;
2163 
2164 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2165 	for (i = 0; i < steps; i++) {
2166 		if (sndcmd) {
2167 			if (mtd->writesize <= 512) {
2168 				uint32_t fill = 0xFFFFFFFF;
2169 
2170 				len = eccsize;
2171 				while (len > 0) {
2172 					int num = min_t(int, len, 4);
2173 					chip->write_buf(mtd, (uint8_t *)&fill,
2174 							num);
2175 					len -= num;
2176 				}
2177 			} else {
2178 				pos = eccsize + i * (eccsize + chunk);
2179 				chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2180 			}
2181 		} else
2182 			sndcmd = 1;
2183 		len = min_t(int, length, chunk);
2184 		chip->write_buf(mtd, bufpoi, len);
2185 		bufpoi += len;
2186 		length -= len;
2187 	}
2188 	if (length > 0)
2189 		chip->write_buf(mtd, bufpoi, length);
2190 
2191 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2192 	status = chip->waitfunc(mtd, chip);
2193 
2194 	return status & NAND_STATUS_FAIL ? -EIO : 0;
2195 }
2196 EXPORT_SYMBOL(nand_write_oob_syndrome);
2197 
2198 /**
2199  * nand_do_read_oob - [INTERN] NAND read out-of-band
2200  * @mtd: MTD device structure
2201  * @from: offset to read from
2202  * @ops: oob operations description structure
2203  *
2204  * NAND read out-of-band data from the spare area.
2205  */
nand_do_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2206 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2207 			    struct mtd_oob_ops *ops)
2208 {
2209 	unsigned int max_bitflips = 0;
2210 	int page, realpage, chipnr;
2211 	struct nand_chip *chip = mtd_to_nand(mtd);
2212 	struct mtd_ecc_stats stats;
2213 	int readlen = ops->ooblen;
2214 	int len;
2215 	uint8_t *buf = ops->oobbuf;
2216 	int ret = 0;
2217 
2218 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
2219 			__func__, (unsigned long long)from, readlen);
2220 
2221 	stats = mtd->ecc_stats;
2222 
2223 	len = mtd_oobavail(mtd, ops);
2224 
2225 	if (unlikely(ops->ooboffs >= len)) {
2226 		pr_debug("%s: attempt to start read outside oob\n",
2227 				__func__);
2228 		return -EINVAL;
2229 	}
2230 
2231 	/* Do not allow reads past end of device */
2232 	if (unlikely(from >= mtd->size ||
2233 		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2234 					(from >> chip->page_shift)) * len)) {
2235 		pr_debug("%s: attempt to read beyond end of device\n",
2236 				__func__);
2237 		return -EINVAL;
2238 	}
2239 
2240 	chipnr = (int)(from >> chip->chip_shift);
2241 	chip->select_chip(mtd, chipnr);
2242 
2243 	/* Shift to get page */
2244 	realpage = (int)(from >> chip->page_shift);
2245 	page = realpage & chip->pagemask;
2246 
2247 	while (1) {
2248 		if (ops->mode == MTD_OPS_RAW)
2249 			ret = chip->ecc.read_oob_raw(mtd, chip, page);
2250 		else
2251 			ret = chip->ecc.read_oob(mtd, chip, page);
2252 
2253 		if (ret < 0)
2254 			break;
2255 
2256 		len = min(len, readlen);
2257 		buf = nand_transfer_oob(mtd, buf, ops, len);
2258 
2259 		if (chip->options & NAND_NEED_READRDY) {
2260 			/* Apply delay or wait for ready/busy pin */
2261 			if (!chip->dev_ready)
2262 				udelay(chip->chip_delay);
2263 			else
2264 				nand_wait_ready(mtd);
2265 		}
2266 
2267 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
2268 
2269 		readlen -= len;
2270 		if (!readlen)
2271 			break;
2272 
2273 		/* Increment page address */
2274 		realpage++;
2275 
2276 		page = realpage & chip->pagemask;
2277 		/* Check, if we cross a chip boundary */
2278 		if (!page) {
2279 			chipnr++;
2280 			chip->select_chip(mtd, -1);
2281 			chip->select_chip(mtd, chipnr);
2282 		}
2283 	}
2284 	chip->select_chip(mtd, -1);
2285 
2286 	ops->oobretlen = ops->ooblen - readlen;
2287 
2288 	if (ret < 0)
2289 		return ret;
2290 
2291 	if (mtd->ecc_stats.failed - stats.failed)
2292 		return -EBADMSG;
2293 
2294 	return max_bitflips;
2295 }
2296 
2297 /**
2298  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2299  * @mtd: MTD device structure
2300  * @from: offset to read from
2301  * @ops: oob operation description structure
2302  *
2303  * NAND read data and/or out-of-band data.
2304  */
nand_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2305 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2306 			 struct mtd_oob_ops *ops)
2307 {
2308 	int ret;
2309 
2310 	ops->retlen = 0;
2311 
2312 	/* Do not allow reads past end of device */
2313 	if (ops->datbuf && (from + ops->len) > mtd->size) {
2314 		pr_debug("%s: attempt to read beyond end of device\n",
2315 				__func__);
2316 		return -EINVAL;
2317 	}
2318 
2319 	if (ops->mode != MTD_OPS_PLACE_OOB &&
2320 	    ops->mode != MTD_OPS_AUTO_OOB &&
2321 	    ops->mode != MTD_OPS_RAW)
2322 		return -ENOTSUPP;
2323 
2324 	nand_get_device(mtd, FL_READING);
2325 
2326 	if (!ops->datbuf)
2327 		ret = nand_do_read_oob(mtd, from, ops);
2328 	else
2329 		ret = nand_do_read_ops(mtd, from, ops);
2330 
2331 	nand_release_device(mtd);
2332 	return ret;
2333 }
2334 
2335 
2336 /**
2337  * nand_write_page_raw - [INTERN] raw page write function
2338  * @mtd: mtd info structure
2339  * @chip: nand chip info structure
2340  * @buf: data buffer
2341  * @oob_required: must write chip->oob_poi to OOB
2342  * @page: page number to write
2343  *
2344  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2345  */
nand_write_page_raw(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2346 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2347 			const uint8_t *buf, int oob_required, int page)
2348 {
2349 	chip->write_buf(mtd, buf, mtd->writesize);
2350 	if (oob_required)
2351 		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2352 
2353 	return 0;
2354 }
2355 EXPORT_SYMBOL(nand_write_page_raw);
2356 
2357 /**
2358  * nand_write_page_raw_syndrome - [INTERN] raw page write function
2359  * @mtd: mtd info structure
2360  * @chip: nand chip info structure
2361  * @buf: data buffer
2362  * @oob_required: must write chip->oob_poi to OOB
2363  * @page: page number to write
2364  *
2365  * We need a special oob layout and handling even when ECC isn't checked.
2366  */
nand_write_page_raw_syndrome(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2367 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2368 					struct nand_chip *chip,
2369 					const uint8_t *buf, int oob_required,
2370 					int page)
2371 {
2372 	int eccsize = chip->ecc.size;
2373 	int eccbytes = chip->ecc.bytes;
2374 	uint8_t *oob = chip->oob_poi;
2375 	int steps, size;
2376 
2377 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2378 		chip->write_buf(mtd, buf, eccsize);
2379 		buf += eccsize;
2380 
2381 		if (chip->ecc.prepad) {
2382 			chip->write_buf(mtd, oob, chip->ecc.prepad);
2383 			oob += chip->ecc.prepad;
2384 		}
2385 
2386 		chip->write_buf(mtd, oob, eccbytes);
2387 		oob += eccbytes;
2388 
2389 		if (chip->ecc.postpad) {
2390 			chip->write_buf(mtd, oob, chip->ecc.postpad);
2391 			oob += chip->ecc.postpad;
2392 		}
2393 	}
2394 
2395 	size = mtd->oobsize - (oob - chip->oob_poi);
2396 	if (size)
2397 		chip->write_buf(mtd, oob, size);
2398 
2399 	return 0;
2400 }
2401 /**
2402  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2403  * @mtd: mtd info structure
2404  * @chip: nand chip info structure
2405  * @buf: data buffer
2406  * @oob_required: must write chip->oob_poi to OOB
2407  * @page: page number to write
2408  */
nand_write_page_swecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2409 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2410 				 const uint8_t *buf, int oob_required,
2411 				 int page)
2412 {
2413 	int i, eccsize = chip->ecc.size, ret;
2414 	int eccbytes = chip->ecc.bytes;
2415 	int eccsteps = chip->ecc.steps;
2416 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2417 	const uint8_t *p = buf;
2418 
2419 	/* Software ECC calculation */
2420 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2421 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2422 
2423 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2424 					 chip->ecc.total);
2425 	if (ret)
2426 		return ret;
2427 
2428 	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2429 }
2430 
2431 /**
2432  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2433  * @mtd: mtd info structure
2434  * @chip: nand chip info structure
2435  * @buf: data buffer
2436  * @oob_required: must write chip->oob_poi to OOB
2437  * @page: page number to write
2438  */
nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2439 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2440 				  const uint8_t *buf, int oob_required,
2441 				  int page)
2442 {
2443 	int i, eccsize = chip->ecc.size, ret;
2444 	int eccbytes = chip->ecc.bytes;
2445 	int eccsteps = chip->ecc.steps;
2446 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2447 	const uint8_t *p = buf;
2448 
2449 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2450 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2451 		chip->write_buf(mtd, p, eccsize);
2452 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2453 	}
2454 
2455 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2456 					 chip->ecc.total);
2457 	if (ret)
2458 		return ret;
2459 
2460 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2461 
2462 	return 0;
2463 }
2464 
2465 
2466 /**
2467  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2468  * @mtd:	mtd info structure
2469  * @chip:	nand chip info structure
2470  * @offset:	column address of subpage within the page
2471  * @data_len:	data length
2472  * @buf:	data buffer
2473  * @oob_required: must write chip->oob_poi to OOB
2474  * @page: page number to write
2475  */
nand_write_subpage_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint32_t offset,uint32_t data_len,const uint8_t * buf,int oob_required,int page)2476 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2477 				struct nand_chip *chip, uint32_t offset,
2478 				uint32_t data_len, const uint8_t *buf,
2479 				int oob_required, int page)
2480 {
2481 	uint8_t *oob_buf  = chip->oob_poi;
2482 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2483 	int ecc_size      = chip->ecc.size;
2484 	int ecc_bytes     = chip->ecc.bytes;
2485 	int ecc_steps     = chip->ecc.steps;
2486 	uint32_t start_step = offset / ecc_size;
2487 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
2488 	int oob_bytes       = mtd->oobsize / ecc_steps;
2489 	int step, ret;
2490 
2491 	for (step = 0; step < ecc_steps; step++) {
2492 		/* configure controller for WRITE access */
2493 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2494 
2495 		/* write data (untouched subpages already masked by 0xFF) */
2496 		chip->write_buf(mtd, buf, ecc_size);
2497 
2498 		/* mask ECC of un-touched subpages by padding 0xFF */
2499 		if ((step < start_step) || (step > end_step))
2500 			memset(ecc_calc, 0xff, ecc_bytes);
2501 		else
2502 			chip->ecc.calculate(mtd, buf, ecc_calc);
2503 
2504 		/* mask OOB of un-touched subpages by padding 0xFF */
2505 		/* if oob_required, preserve OOB metadata of written subpage */
2506 		if (!oob_required || (step < start_step) || (step > end_step))
2507 			memset(oob_buf, 0xff, oob_bytes);
2508 
2509 		buf += ecc_size;
2510 		ecc_calc += ecc_bytes;
2511 		oob_buf  += oob_bytes;
2512 	}
2513 
2514 	/* copy calculated ECC for whole page to chip->buffer->oob */
2515 	/* this include masked-value(0xFF) for unwritten subpages */
2516 	ecc_calc = chip->buffers->ecccalc;
2517 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2518 					 chip->ecc.total);
2519 	if (ret)
2520 		return ret;
2521 
2522 	/* write OOB buffer to NAND device */
2523 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2524 
2525 	return 0;
2526 }
2527 
2528 
2529 /**
2530  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2531  * @mtd: mtd info structure
2532  * @chip: nand chip info structure
2533  * @buf: data buffer
2534  * @oob_required: must write chip->oob_poi to OOB
2535  * @page: page number to write
2536  *
2537  * The hw generator calculates the error syndrome automatically. Therefore we
2538  * need a special oob layout and handling.
2539  */
nand_write_page_syndrome(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2540 static int nand_write_page_syndrome(struct mtd_info *mtd,
2541 				    struct nand_chip *chip,
2542 				    const uint8_t *buf, int oob_required,
2543 				    int page)
2544 {
2545 	int i, eccsize = chip->ecc.size;
2546 	int eccbytes = chip->ecc.bytes;
2547 	int eccsteps = chip->ecc.steps;
2548 	const uint8_t *p = buf;
2549 	uint8_t *oob = chip->oob_poi;
2550 
2551 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2552 
2553 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2554 		chip->write_buf(mtd, p, eccsize);
2555 
2556 		if (chip->ecc.prepad) {
2557 			chip->write_buf(mtd, oob, chip->ecc.prepad);
2558 			oob += chip->ecc.prepad;
2559 		}
2560 
2561 		chip->ecc.calculate(mtd, p, oob);
2562 		chip->write_buf(mtd, oob, eccbytes);
2563 		oob += eccbytes;
2564 
2565 		if (chip->ecc.postpad) {
2566 			chip->write_buf(mtd, oob, chip->ecc.postpad);
2567 			oob += chip->ecc.postpad;
2568 		}
2569 	}
2570 
2571 	/* Calculate remaining oob bytes */
2572 	i = mtd->oobsize - (oob - chip->oob_poi);
2573 	if (i)
2574 		chip->write_buf(mtd, oob, i);
2575 
2576 	return 0;
2577 }
2578 
2579 /**
2580  * nand_write_page - write one page
2581  * @mtd: MTD device structure
2582  * @chip: NAND chip descriptor
2583  * @offset: address offset within the page
2584  * @data_len: length of actual data to be written
2585  * @buf: the data to write
2586  * @oob_required: must write chip->oob_poi to OOB
2587  * @page: page number to write
2588  * @raw: use _raw version of write_page
2589  */
nand_write_page(struct mtd_info * mtd,struct nand_chip * chip,uint32_t offset,int data_len,const uint8_t * buf,int oob_required,int page,int raw)2590 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2591 		uint32_t offset, int data_len, const uint8_t *buf,
2592 		int oob_required, int page, int raw)
2593 {
2594 	int status, subpage;
2595 
2596 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2597 		chip->ecc.write_subpage)
2598 		subpage = offset || (data_len < mtd->writesize);
2599 	else
2600 		subpage = 0;
2601 
2602 	if (nand_standard_page_accessors(&chip->ecc))
2603 		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2604 
2605 	if (unlikely(raw))
2606 		status = chip->ecc.write_page_raw(mtd, chip, buf,
2607 						  oob_required, page);
2608 	else if (subpage)
2609 		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2610 						 buf, oob_required, page);
2611 	else
2612 		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2613 					      page);
2614 
2615 	if (status < 0)
2616 		return status;
2617 
2618 	if (nand_standard_page_accessors(&chip->ecc)) {
2619 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2620 
2621 		status = chip->waitfunc(mtd, chip);
2622 		if (status & NAND_STATUS_FAIL)
2623 			return -EIO;
2624 	}
2625 
2626 	return 0;
2627 }
2628 
2629 /**
2630  * nand_fill_oob - [INTERN] Transfer client buffer to oob
2631  * @mtd: MTD device structure
2632  * @oob: oob data buffer
2633  * @len: oob data write length
2634  * @ops: oob ops structure
2635  */
nand_fill_oob(struct mtd_info * mtd,uint8_t * oob,size_t len,struct mtd_oob_ops * ops)2636 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2637 			      struct mtd_oob_ops *ops)
2638 {
2639 	struct nand_chip *chip = mtd_to_nand(mtd);
2640 	int ret;
2641 
2642 	/*
2643 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2644 	 * data from a previous OOB read.
2645 	 */
2646 	memset(chip->oob_poi, 0xff, mtd->oobsize);
2647 
2648 	switch (ops->mode) {
2649 
2650 	case MTD_OPS_PLACE_OOB:
2651 	case MTD_OPS_RAW:
2652 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2653 		return oob + len;
2654 
2655 	case MTD_OPS_AUTO_OOB:
2656 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2657 						  ops->ooboffs, len);
2658 		BUG_ON(ret);
2659 		return oob + len;
2660 
2661 	default:
2662 		BUG();
2663 	}
2664 	return NULL;
2665 }
2666 
2667 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
2668 
2669 /**
2670  * nand_do_write_ops - [INTERN] NAND write with ECC
2671  * @mtd: MTD device structure
2672  * @to: offset to write to
2673  * @ops: oob operations description structure
2674  *
2675  * NAND write with ECC.
2676  */
nand_do_write_ops(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)2677 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2678 			     struct mtd_oob_ops *ops)
2679 {
2680 	int chipnr, realpage, page, column;
2681 	struct nand_chip *chip = mtd_to_nand(mtd);
2682 	uint32_t writelen = ops->len;
2683 
2684 	uint32_t oobwritelen = ops->ooblen;
2685 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2686 
2687 	uint8_t *oob = ops->oobbuf;
2688 	uint8_t *buf = ops->datbuf;
2689 	int ret;
2690 	int oob_required = oob ? 1 : 0;
2691 
2692 	ops->retlen = 0;
2693 	if (!writelen)
2694 		return 0;
2695 
2696 	/* Reject writes, which are not page aligned */
2697 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2698 		pr_notice("%s: attempt to write non page aligned data\n",
2699 			   __func__);
2700 		return -EINVAL;
2701 	}
2702 
2703 	column = to & (mtd->writesize - 1);
2704 
2705 	chipnr = (int)(to >> chip->chip_shift);
2706 	chip->select_chip(mtd, chipnr);
2707 
2708 	/* Check, if it is write protected */
2709 	if (nand_check_wp(mtd)) {
2710 		ret = -EIO;
2711 		goto err_out;
2712 	}
2713 
2714 	realpage = (int)(to >> chip->page_shift);
2715 	page = realpage & chip->pagemask;
2716 
2717 	/* Invalidate the page cache, when we write to the cached page */
2718 	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2719 	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2720 		chip->pagebuf = -1;
2721 
2722 	/* Don't allow multipage oob writes with offset */
2723 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2724 		ret = -EINVAL;
2725 		goto err_out;
2726 	}
2727 
2728 	while (1) {
2729 		int bytes = mtd->writesize;
2730 		uint8_t *wbuf = buf;
2731 		int use_bufpoi;
2732 		int part_pagewr = (column || writelen < mtd->writesize);
2733 
2734 		if (part_pagewr)
2735 			use_bufpoi = 1;
2736 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2737 			use_bufpoi = !virt_addr_valid(buf) ||
2738 				     !IS_ALIGNED((unsigned long)buf,
2739 						 chip->buf_align);
2740 		else
2741 			use_bufpoi = 0;
2742 
2743 		/* Partial page write?, or need to use bounce buffer */
2744 		if (use_bufpoi) {
2745 			pr_debug("%s: using write bounce buffer for buf@%p\n",
2746 					 __func__, buf);
2747 			if (part_pagewr)
2748 				bytes = min_t(int, bytes - column, writelen);
2749 			chip->pagebuf = -1;
2750 			memset(chip->buffers->databuf, 0xff, mtd->writesize);
2751 			memcpy(&chip->buffers->databuf[column], buf, bytes);
2752 			wbuf = chip->buffers->databuf;
2753 		}
2754 
2755 		if (unlikely(oob)) {
2756 			size_t len = min(oobwritelen, oobmaxlen);
2757 			oob = nand_fill_oob(mtd, oob, len, ops);
2758 			oobwritelen -= len;
2759 		} else {
2760 			/* We still need to erase leftover OOB data */
2761 			memset(chip->oob_poi, 0xff, mtd->oobsize);
2762 		}
2763 
2764 		ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2765 				      oob_required, page,
2766 				      (ops->mode == MTD_OPS_RAW));
2767 		if (ret)
2768 			break;
2769 
2770 		writelen -= bytes;
2771 		if (!writelen)
2772 			break;
2773 
2774 		column = 0;
2775 		buf += bytes;
2776 		realpage++;
2777 
2778 		page = realpage & chip->pagemask;
2779 		/* Check, if we cross a chip boundary */
2780 		if (!page) {
2781 			chipnr++;
2782 			chip->select_chip(mtd, -1);
2783 			chip->select_chip(mtd, chipnr);
2784 		}
2785 	}
2786 
2787 	ops->retlen = ops->len - writelen;
2788 	if (unlikely(oob))
2789 		ops->oobretlen = ops->ooblen;
2790 
2791 err_out:
2792 	chip->select_chip(mtd, -1);
2793 	return ret;
2794 }
2795 
2796 /**
2797  * panic_nand_write - [MTD Interface] NAND write with ECC
2798  * @mtd: MTD device structure
2799  * @to: offset to write to
2800  * @len: number of bytes to write
2801  * @retlen: pointer to variable to store the number of written bytes
2802  * @buf: the data to write
2803  *
2804  * NAND write with ECC. Used when performing writes in interrupt context, this
2805  * may for example be called by mtdoops when writing an oops while in panic.
2806  */
panic_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const uint8_t * buf)2807 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2808 			    size_t *retlen, const uint8_t *buf)
2809 {
2810 	struct nand_chip *chip = mtd_to_nand(mtd);
2811 	int chipnr = (int)(to >> chip->chip_shift);
2812 	struct mtd_oob_ops ops;
2813 	int ret;
2814 
2815 	/* Grab the device */
2816 	panic_nand_get_device(chip, mtd, FL_WRITING);
2817 
2818 	chip->select_chip(mtd, chipnr);
2819 
2820 	/* Wait for the device to get ready */
2821 	panic_nand_wait(mtd, chip, 400);
2822 
2823 	memset(&ops, 0, sizeof(ops));
2824 	ops.len = len;
2825 	ops.datbuf = (uint8_t *)buf;
2826 	ops.mode = MTD_OPS_PLACE_OOB;
2827 
2828 	ret = nand_do_write_ops(mtd, to, &ops);
2829 
2830 	*retlen = ops.retlen;
2831 	return ret;
2832 }
2833 
2834 /**
2835  * nand_write - [MTD Interface] NAND write with ECC
2836  * @mtd: MTD device structure
2837  * @to: offset to write to
2838  * @len: number of bytes to write
2839  * @retlen: pointer to variable to store the number of written bytes
2840  * @buf: the data to write
2841  *
2842  * NAND write with ECC.
2843  */
nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const uint8_t * buf)2844 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2845 			  size_t *retlen, const uint8_t *buf)
2846 {
2847 	struct mtd_oob_ops ops;
2848 	int ret;
2849 
2850 	nand_get_device(mtd, FL_WRITING);
2851 	memset(&ops, 0, sizeof(ops));
2852 	ops.len = len;
2853 	ops.datbuf = (uint8_t *)buf;
2854 	ops.mode = MTD_OPS_PLACE_OOB;
2855 	ret = nand_do_write_ops(mtd, to, &ops);
2856 	*retlen = ops.retlen;
2857 	nand_release_device(mtd);
2858 	return ret;
2859 }
2860 
2861 /**
2862  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2863  * @mtd: MTD device structure
2864  * @to: offset to write to
2865  * @ops: oob operation description structure
2866  *
2867  * NAND write out-of-band.
2868  */
nand_do_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)2869 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2870 			     struct mtd_oob_ops *ops)
2871 {
2872 	int chipnr, page, status, len;
2873 	struct nand_chip *chip = mtd_to_nand(mtd);
2874 
2875 	pr_debug("%s: to = 0x%08x, len = %i\n",
2876 			 __func__, (unsigned int)to, (int)ops->ooblen);
2877 
2878 	len = mtd_oobavail(mtd, ops);
2879 
2880 	/* Do not allow write past end of page */
2881 	if ((ops->ooboffs + ops->ooblen) > len) {
2882 		pr_debug("%s: attempt to write past end of page\n",
2883 				__func__);
2884 		return -EINVAL;
2885 	}
2886 
2887 	if (unlikely(ops->ooboffs >= len)) {
2888 		pr_debug("%s: attempt to start write outside oob\n",
2889 				__func__);
2890 		return -EINVAL;
2891 	}
2892 
2893 	/* Do not allow write past end of device */
2894 	if (unlikely(to >= mtd->size ||
2895 		     ops->ooboffs + ops->ooblen >
2896 			((mtd->size >> chip->page_shift) -
2897 			 (to >> chip->page_shift)) * len)) {
2898 		pr_debug("%s: attempt to write beyond end of device\n",
2899 				__func__);
2900 		return -EINVAL;
2901 	}
2902 
2903 	chipnr = (int)(to >> chip->chip_shift);
2904 
2905 	/*
2906 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2907 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
2908 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2909 	 * it in the doc2000 driver in August 1999.  dwmw2.
2910 	 */
2911 	nand_reset(chip, chipnr);
2912 
2913 	chip->select_chip(mtd, chipnr);
2914 
2915 	/* Shift to get page */
2916 	page = (int)(to >> chip->page_shift);
2917 
2918 	/* Check, if it is write protected */
2919 	if (nand_check_wp(mtd)) {
2920 		chip->select_chip(mtd, -1);
2921 		return -EROFS;
2922 	}
2923 
2924 	/* Invalidate the page cache, if we write to the cached page */
2925 	if (page == chip->pagebuf)
2926 		chip->pagebuf = -1;
2927 
2928 	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2929 
2930 	if (ops->mode == MTD_OPS_RAW)
2931 		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2932 	else
2933 		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2934 
2935 	chip->select_chip(mtd, -1);
2936 
2937 	if (status)
2938 		return status;
2939 
2940 	ops->oobretlen = ops->ooblen;
2941 
2942 	return 0;
2943 }
2944 
2945 /**
2946  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2947  * @mtd: MTD device structure
2948  * @to: offset to write to
2949  * @ops: oob operation description structure
2950  */
nand_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)2951 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2952 			  struct mtd_oob_ops *ops)
2953 {
2954 	int ret = -ENOTSUPP;
2955 
2956 	ops->retlen = 0;
2957 
2958 	/* Do not allow writes past end of device */
2959 	if (ops->datbuf && (to + ops->len) > mtd->size) {
2960 		pr_debug("%s: attempt to write beyond end of device\n",
2961 				__func__);
2962 		return -EINVAL;
2963 	}
2964 
2965 	nand_get_device(mtd, FL_WRITING);
2966 
2967 	switch (ops->mode) {
2968 	case MTD_OPS_PLACE_OOB:
2969 	case MTD_OPS_AUTO_OOB:
2970 	case MTD_OPS_RAW:
2971 		break;
2972 
2973 	default:
2974 		goto out;
2975 	}
2976 
2977 	if (!ops->datbuf)
2978 		ret = nand_do_write_oob(mtd, to, ops);
2979 	else
2980 		ret = nand_do_write_ops(mtd, to, ops);
2981 
2982 out:
2983 	nand_release_device(mtd);
2984 	return ret;
2985 }
2986 
2987 /**
2988  * single_erase - [GENERIC] NAND standard block erase command function
2989  * @mtd: MTD device structure
2990  * @page: the page address of the block which will be erased
2991  *
2992  * Standard erase command for NAND chips. Returns NAND status.
2993  */
single_erase(struct mtd_info * mtd,int page)2994 static int single_erase(struct mtd_info *mtd, int page)
2995 {
2996 	struct nand_chip *chip = mtd_to_nand(mtd);
2997 	/* Send commands to erase a block */
2998 	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2999 	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3000 
3001 	return chip->waitfunc(mtd, chip);
3002 }
3003 
3004 /**
3005  * nand_erase - [MTD Interface] erase block(s)
3006  * @mtd: MTD device structure
3007  * @instr: erase instruction
3008  *
3009  * Erase one ore more blocks.
3010  */
nand_erase(struct mtd_info * mtd,struct erase_info * instr)3011 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3012 {
3013 	return nand_erase_nand(mtd, instr, 0);
3014 }
3015 
3016 /**
3017  * nand_erase_nand - [INTERN] erase block(s)
3018  * @mtd: MTD device structure
3019  * @instr: erase instruction
3020  * @allowbbt: allow erasing the bbt area
3021  *
3022  * Erase one ore more blocks.
3023  */
nand_erase_nand(struct mtd_info * mtd,struct erase_info * instr,int allowbbt)3024 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3025 		    int allowbbt)
3026 {
3027 	int page, status, pages_per_block, ret, chipnr;
3028 	struct nand_chip *chip = mtd_to_nand(mtd);
3029 	loff_t len;
3030 
3031 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
3032 			__func__, (unsigned long long)instr->addr,
3033 			(unsigned long long)instr->len);
3034 
3035 	if (check_offs_len(mtd, instr->addr, instr->len))
3036 		return -EINVAL;
3037 
3038 	/* Grab the lock and see if the device is available */
3039 	nand_get_device(mtd, FL_ERASING);
3040 
3041 	/* Shift to get first page */
3042 	page = (int)(instr->addr >> chip->page_shift);
3043 	chipnr = (int)(instr->addr >> chip->chip_shift);
3044 
3045 	/* Calculate pages in each block */
3046 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3047 
3048 	/* Select the NAND device */
3049 	chip->select_chip(mtd, chipnr);
3050 
3051 	/* Check, if it is write protected */
3052 	if (nand_check_wp(mtd)) {
3053 		pr_debug("%s: device is write protected!\n",
3054 				__func__);
3055 		instr->state = MTD_ERASE_FAILED;
3056 		goto erase_exit;
3057 	}
3058 
3059 	/* Loop through the pages */
3060 	len = instr->len;
3061 
3062 	instr->state = MTD_ERASING;
3063 
3064 	while (len) {
3065 		/* Check if we have a bad block, we do not erase bad blocks! */
3066 		if (nand_block_checkbad(mtd, ((loff_t) page) <<
3067 					chip->page_shift, allowbbt)) {
3068 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3069 				    __func__, page);
3070 			instr->state = MTD_ERASE_FAILED;
3071 			goto erase_exit;
3072 		}
3073 
3074 		/*
3075 		 * Invalidate the page cache, if we erase the block which
3076 		 * contains the current cached page.
3077 		 */
3078 		if (page <= chip->pagebuf && chip->pagebuf <
3079 		    (page + pages_per_block))
3080 			chip->pagebuf = -1;
3081 
3082 		status = chip->erase(mtd, page & chip->pagemask);
3083 
3084 		/* See if block erase succeeded */
3085 		if (status & NAND_STATUS_FAIL) {
3086 			pr_debug("%s: failed erase, page 0x%08x\n",
3087 					__func__, page);
3088 			instr->state = MTD_ERASE_FAILED;
3089 			instr->fail_addr =
3090 				((loff_t)page << chip->page_shift);
3091 			goto erase_exit;
3092 		}
3093 
3094 		/* Increment page address and decrement length */
3095 		len -= (1ULL << chip->phys_erase_shift);
3096 		page += pages_per_block;
3097 
3098 		/* Check, if we cross a chip boundary */
3099 		if (len && !(page & chip->pagemask)) {
3100 			chipnr++;
3101 			chip->select_chip(mtd, -1);
3102 			chip->select_chip(mtd, chipnr);
3103 		}
3104 	}
3105 	instr->state = MTD_ERASE_DONE;
3106 
3107 erase_exit:
3108 
3109 	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3110 
3111 	/* Deselect and wake up anyone waiting on the device */
3112 	chip->select_chip(mtd, -1);
3113 	nand_release_device(mtd);
3114 
3115 	/* Do call back function */
3116 	if (!ret)
3117 		mtd_erase_callback(instr);
3118 
3119 	/* Return more or less happy */
3120 	return ret;
3121 }
3122 
3123 /**
3124  * nand_sync - [MTD Interface] sync
3125  * @mtd: MTD device structure
3126  *
3127  * Sync is actually a wait for chip ready function.
3128  */
nand_sync(struct mtd_info * mtd)3129 static void nand_sync(struct mtd_info *mtd)
3130 {
3131 	pr_debug("%s: called\n", __func__);
3132 
3133 	/* Grab the lock and see if the device is available */
3134 	nand_get_device(mtd, FL_SYNCING);
3135 	/* Release it and go back */
3136 	nand_release_device(mtd);
3137 }
3138 
3139 /**
3140  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3141  * @mtd: MTD device structure
3142  * @offs: offset relative to mtd start
3143  */
nand_block_isbad(struct mtd_info * mtd,loff_t offs)3144 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3145 {
3146 	struct nand_chip *chip = mtd_to_nand(mtd);
3147 	int chipnr = (int)(offs >> chip->chip_shift);
3148 	int ret;
3149 
3150 	/* Select the NAND device */
3151 	nand_get_device(mtd, FL_READING);
3152 	chip->select_chip(mtd, chipnr);
3153 
3154 	ret = nand_block_checkbad(mtd, offs, 0);
3155 
3156 	chip->select_chip(mtd, -1);
3157 	nand_release_device(mtd);
3158 
3159 	return ret;
3160 }
3161 
3162 /**
3163  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3164  * @mtd: MTD device structure
3165  * @ofs: offset relative to mtd start
3166  */
nand_block_markbad(struct mtd_info * mtd,loff_t ofs)3167 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3168 {
3169 	int ret;
3170 
3171 	ret = nand_block_isbad(mtd, ofs);
3172 	if (ret) {
3173 		/* If it was bad already, return success and do nothing */
3174 		if (ret > 0)
3175 			return 0;
3176 		return ret;
3177 	}
3178 
3179 	return nand_block_markbad_lowlevel(mtd, ofs);
3180 }
3181 
3182 /**
3183  * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3184  * @mtd: MTD device structure
3185  * @ofs: offset relative to mtd start
3186  * @len: length of mtd
3187  */
nand_max_bad_blocks(struct mtd_info * mtd,loff_t ofs,size_t len)3188 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3189 {
3190 	struct nand_chip *chip = mtd_to_nand(mtd);
3191 	u32 part_start_block;
3192 	u32 part_end_block;
3193 	u32 part_start_die;
3194 	u32 part_end_die;
3195 
3196 	/*
3197 	 * max_bb_per_die and blocks_per_die used to determine
3198 	 * the maximum bad block count.
3199 	 */
3200 	if (!chip->max_bb_per_die || !chip->blocks_per_die)
3201 		return -ENOTSUPP;
3202 
3203 	/* Get the start and end of the partition in erase blocks. */
3204 	part_start_block = mtd_div_by_eb(ofs, mtd);
3205 	part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3206 
3207 	/* Get the start and end LUNs of the partition. */
3208 	part_start_die = part_start_block / chip->blocks_per_die;
3209 	part_end_die = part_end_block / chip->blocks_per_die;
3210 
3211 	/*
3212 	 * Look up the bad blocks per unit and multiply by the number of units
3213 	 * that the partition spans.
3214 	 */
3215 	return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3216 }
3217 
3218 /**
3219  * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3220  * @mtd: MTD device structure
3221  * @chip: nand chip info structure
3222  * @addr: feature address.
3223  * @subfeature_param: the subfeature parameters, a four bytes array.
3224  */
nand_onfi_set_features(struct mtd_info * mtd,struct nand_chip * chip,int addr,uint8_t * subfeature_param)3225 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3226 			int addr, uint8_t *subfeature_param)
3227 {
3228 	int status;
3229 	int i;
3230 
3231 	if (!chip->onfi_version ||
3232 	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3233 	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3234 		return -EINVAL;
3235 
3236 	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3237 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3238 		chip->write_byte(mtd, subfeature_param[i]);
3239 
3240 	status = chip->waitfunc(mtd, chip);
3241 	if (status & NAND_STATUS_FAIL)
3242 		return -EIO;
3243 	return 0;
3244 }
3245 
3246 /**
3247  * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3248  * @mtd: MTD device structure
3249  * @chip: nand chip info structure
3250  * @addr: feature address.
3251  * @subfeature_param: the subfeature parameters, a four bytes array.
3252  */
nand_onfi_get_features(struct mtd_info * mtd,struct nand_chip * chip,int addr,uint8_t * subfeature_param)3253 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3254 			int addr, uint8_t *subfeature_param)
3255 {
3256 	int i;
3257 
3258 	if (!chip->onfi_version ||
3259 	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3260 	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3261 		return -EINVAL;
3262 
3263 	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3264 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3265 		*subfeature_param++ = chip->read_byte(mtd);
3266 	return 0;
3267 }
3268 
3269 /**
3270  * nand_onfi_get_set_features_notsupp - set/get features stub returning
3271  *					-ENOTSUPP
3272  * @mtd: MTD device structure
3273  * @chip: nand chip info structure
3274  * @addr: feature address.
3275  * @subfeature_param: the subfeature parameters, a four bytes array.
3276  *
3277  * Should be used by NAND controller drivers that do not support the SET/GET
3278  * FEATURES operations.
3279  */
nand_onfi_get_set_features_notsupp(struct mtd_info * mtd,struct nand_chip * chip,int addr,u8 * subfeature_param)3280 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
3281 				       struct nand_chip *chip, int addr,
3282 				       u8 *subfeature_param)
3283 {
3284 	return -ENOTSUPP;
3285 }
3286 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
3287 
3288 /**
3289  * nand_suspend - [MTD Interface] Suspend the NAND flash
3290  * @mtd: MTD device structure
3291  */
nand_suspend(struct mtd_info * mtd)3292 static int nand_suspend(struct mtd_info *mtd)
3293 {
3294 	return nand_get_device(mtd, FL_PM_SUSPENDED);
3295 }
3296 
3297 /**
3298  * nand_resume - [MTD Interface] Resume the NAND flash
3299  * @mtd: MTD device structure
3300  */
nand_resume(struct mtd_info * mtd)3301 static void nand_resume(struct mtd_info *mtd)
3302 {
3303 	struct nand_chip *chip = mtd_to_nand(mtd);
3304 
3305 	if (chip->state == FL_PM_SUSPENDED)
3306 		nand_release_device(mtd);
3307 	else
3308 		pr_err("%s called for a chip which is not in suspended state\n",
3309 			__func__);
3310 }
3311 
3312 /**
3313  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3314  *                 prevent further operations
3315  * @mtd: MTD device structure
3316  */
nand_shutdown(struct mtd_info * mtd)3317 static void nand_shutdown(struct mtd_info *mtd)
3318 {
3319 	nand_get_device(mtd, FL_PM_SUSPENDED);
3320 }
3321 
3322 /* Set default functions */
nand_set_defaults(struct nand_chip * chip)3323 static void nand_set_defaults(struct nand_chip *chip)
3324 {
3325 	unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3326 
3327 	/* check for proper chip_delay setup, set 20us if not */
3328 	if (!chip->chip_delay)
3329 		chip->chip_delay = 20;
3330 
3331 	/* check, if a user supplied command function given */
3332 	if (chip->cmdfunc == NULL)
3333 		chip->cmdfunc = nand_command;
3334 
3335 	/* check, if a user supplied wait function given */
3336 	if (chip->waitfunc == NULL)
3337 		chip->waitfunc = nand_wait;
3338 
3339 	if (!chip->select_chip)
3340 		chip->select_chip = nand_select_chip;
3341 
3342 	/* set for ONFI nand */
3343 	if (!chip->onfi_set_features)
3344 		chip->onfi_set_features = nand_onfi_set_features;
3345 	if (!chip->onfi_get_features)
3346 		chip->onfi_get_features = nand_onfi_get_features;
3347 
3348 	/* If called twice, pointers that depend on busw may need to be reset */
3349 	if (!chip->read_byte || chip->read_byte == nand_read_byte)
3350 		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3351 	if (!chip->read_word)
3352 		chip->read_word = nand_read_word;
3353 	if (!chip->block_bad)
3354 		chip->block_bad = nand_block_bad;
3355 	if (!chip->block_markbad)
3356 		chip->block_markbad = nand_default_block_markbad;
3357 	if (!chip->write_buf || chip->write_buf == nand_write_buf)
3358 		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3359 	if (!chip->write_byte || chip->write_byte == nand_write_byte)
3360 		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3361 	if (!chip->read_buf || chip->read_buf == nand_read_buf)
3362 		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3363 	if (!chip->scan_bbt)
3364 		chip->scan_bbt = nand_default_bbt;
3365 
3366 	if (!chip->controller) {
3367 		chip->controller = &chip->hwcontrol;
3368 		nand_hw_control_init(chip->controller);
3369 	}
3370 
3371 	if (!chip->buf_align)
3372 		chip->buf_align = 1;
3373 }
3374 
3375 /* Sanitize ONFI strings so we can safely print them */
sanitize_string(uint8_t * s,size_t len)3376 static void sanitize_string(uint8_t *s, size_t len)
3377 {
3378 	ssize_t i;
3379 
3380 	/* Null terminate */
3381 	s[len - 1] = 0;
3382 
3383 	/* Remove non printable chars */
3384 	for (i = 0; i < len - 1; i++) {
3385 		if (s[i] < ' ' || s[i] > 127)
3386 			s[i] = '?';
3387 	}
3388 
3389 	/* Remove trailing spaces */
3390 	strim(s);
3391 }
3392 
onfi_crc16(u16 crc,u8 const * p,size_t len)3393 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3394 {
3395 	int i;
3396 	while (len--) {
3397 		crc ^= *p++ << 8;
3398 		for (i = 0; i < 8; i++)
3399 			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3400 	}
3401 
3402 	return crc;
3403 }
3404 
3405 /* Parse the Extended Parameter Page. */
nand_flash_detect_ext_param_page(struct nand_chip * chip,struct nand_onfi_params * p)3406 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3407 					    struct nand_onfi_params *p)
3408 {
3409 	struct mtd_info *mtd = nand_to_mtd(chip);
3410 	struct onfi_ext_param_page *ep;
3411 	struct onfi_ext_section *s;
3412 	struct onfi_ext_ecc_info *ecc;
3413 	uint8_t *cursor;
3414 	int ret = -EINVAL;
3415 	int len;
3416 	int i;
3417 
3418 	len = le16_to_cpu(p->ext_param_page_length) * 16;
3419 	ep = kmalloc(len, GFP_KERNEL);
3420 	if (!ep)
3421 		return -ENOMEM;
3422 
3423 	/* Send our own NAND_CMD_PARAM. */
3424 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3425 
3426 	/* Use the Change Read Column command to skip the ONFI param pages. */
3427 	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3428 			sizeof(*p) * p->num_of_param_pages , -1);
3429 
3430 	/* Read out the Extended Parameter Page. */
3431 	chip->read_buf(mtd, (uint8_t *)ep, len);
3432 	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3433 		!= le16_to_cpu(ep->crc))) {
3434 		pr_debug("fail in the CRC.\n");
3435 		goto ext_out;
3436 	}
3437 
3438 	/*
3439 	 * Check the signature.
3440 	 * Do not strictly follow the ONFI spec, maybe changed in future.
3441 	 */
3442 	if (strncmp(ep->sig, "EPPS", 4)) {
3443 		pr_debug("The signature is invalid.\n");
3444 		goto ext_out;
3445 	}
3446 
3447 	/* find the ECC section. */
3448 	cursor = (uint8_t *)(ep + 1);
3449 	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3450 		s = ep->sections + i;
3451 		if (s->type == ONFI_SECTION_TYPE_2)
3452 			break;
3453 		cursor += s->length * 16;
3454 	}
3455 	if (i == ONFI_EXT_SECTION_MAX) {
3456 		pr_debug("We can not find the ECC section.\n");
3457 		goto ext_out;
3458 	}
3459 
3460 	/* get the info we want. */
3461 	ecc = (struct onfi_ext_ecc_info *)cursor;
3462 
3463 	if (!ecc->codeword_size) {
3464 		pr_debug("Invalid codeword size\n");
3465 		goto ext_out;
3466 	}
3467 
3468 	chip->ecc_strength_ds = ecc->ecc_bits;
3469 	chip->ecc_step_ds = 1 << ecc->codeword_size;
3470 	ret = 0;
3471 
3472 ext_out:
3473 	kfree(ep);
3474 	return ret;
3475 }
3476 
3477 /*
3478  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3479  */
nand_flash_detect_onfi(struct nand_chip * chip)3480 static int nand_flash_detect_onfi(struct nand_chip *chip)
3481 {
3482 	struct mtd_info *mtd = nand_to_mtd(chip);
3483 	struct nand_onfi_params *p = &chip->onfi_params;
3484 	int i, j;
3485 	int val;
3486 
3487 	/* Try ONFI for unknown chip or LP */
3488 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3489 	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3490 		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3491 		return 0;
3492 
3493 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3494 	for (i = 0; i < 3; i++) {
3495 		for (j = 0; j < sizeof(*p); j++)
3496 			((uint8_t *)p)[j] = chip->read_byte(mtd);
3497 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3498 				le16_to_cpu(p->crc)) {
3499 			break;
3500 		}
3501 	}
3502 
3503 	if (i == 3) {
3504 		pr_err("Could not find valid ONFI parameter page; aborting\n");
3505 		return 0;
3506 	}
3507 
3508 	/* Check version */
3509 	val = le16_to_cpu(p->revision);
3510 	if (val & (1 << 5))
3511 		chip->onfi_version = 23;
3512 	else if (val & (1 << 4))
3513 		chip->onfi_version = 22;
3514 	else if (val & (1 << 3))
3515 		chip->onfi_version = 21;
3516 	else if (val & (1 << 2))
3517 		chip->onfi_version = 20;
3518 	else if (val & (1 << 1))
3519 		chip->onfi_version = 10;
3520 
3521 	if (!chip->onfi_version) {
3522 		pr_info("unsupported ONFI version: %d\n", val);
3523 		return 0;
3524 	}
3525 
3526 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3527 	sanitize_string(p->model, sizeof(p->model));
3528 	if (!mtd->name)
3529 		mtd->name = p->model;
3530 
3531 	mtd->writesize = le32_to_cpu(p->byte_per_page);
3532 
3533 	/*
3534 	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3535 	 * (don't ask me who thought of this...). MTD assumes that these
3536 	 * dimensions will be power-of-2, so just truncate the remaining area.
3537 	 */
3538 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3539 	mtd->erasesize *= mtd->writesize;
3540 
3541 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3542 
3543 	/* See erasesize comment */
3544 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3545 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3546 	chip->bits_per_cell = p->bits_per_cell;
3547 
3548 	chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3549 	chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3550 
3551 	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3552 		chip->options |= NAND_BUSWIDTH_16;
3553 
3554 	if (p->ecc_bits != 0xff) {
3555 		chip->ecc_strength_ds = p->ecc_bits;
3556 		chip->ecc_step_ds = 512;
3557 	} else if (chip->onfi_version >= 21 &&
3558 		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3559 
3560 		/*
3561 		 * The nand_flash_detect_ext_param_page() uses the
3562 		 * Change Read Column command which maybe not supported
3563 		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3564 		 * now. We do not replace user supplied command function.
3565 		 */
3566 		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3567 			chip->cmdfunc = nand_command_lp;
3568 
3569 		/* The Extended Parameter Page is supported since ONFI 2.1. */
3570 		if (nand_flash_detect_ext_param_page(chip, p))
3571 			pr_warn("Failed to detect ONFI extended param page\n");
3572 	} else {
3573 		pr_warn("Could not retrieve ONFI ECC requirements\n");
3574 	}
3575 
3576 	return 1;
3577 }
3578 
3579 /*
3580  * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3581  */
nand_flash_detect_jedec(struct nand_chip * chip)3582 static int nand_flash_detect_jedec(struct nand_chip *chip)
3583 {
3584 	struct mtd_info *mtd = nand_to_mtd(chip);
3585 	struct nand_jedec_params *p = &chip->jedec_params;
3586 	struct jedec_ecc_info *ecc;
3587 	int val;
3588 	int i, j;
3589 
3590 	/* Try JEDEC for unknown chip or LP */
3591 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3592 	if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3593 		chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3594 		chip->read_byte(mtd) != 'C')
3595 		return 0;
3596 
3597 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3598 	for (i = 0; i < 3; i++) {
3599 		for (j = 0; j < sizeof(*p); j++)
3600 			((uint8_t *)p)[j] = chip->read_byte(mtd);
3601 
3602 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3603 				le16_to_cpu(p->crc))
3604 			break;
3605 	}
3606 
3607 	if (i == 3) {
3608 		pr_err("Could not find valid JEDEC parameter page; aborting\n");
3609 		return 0;
3610 	}
3611 
3612 	/* Check version */
3613 	val = le16_to_cpu(p->revision);
3614 	if (val & (1 << 2))
3615 		chip->jedec_version = 10;
3616 	else if (val & (1 << 1))
3617 		chip->jedec_version = 1; /* vendor specific version */
3618 
3619 	if (!chip->jedec_version) {
3620 		pr_info("unsupported JEDEC version: %d\n", val);
3621 		return 0;
3622 	}
3623 
3624 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3625 	sanitize_string(p->model, sizeof(p->model));
3626 	if (!mtd->name)
3627 		mtd->name = p->model;
3628 
3629 	mtd->writesize = le32_to_cpu(p->byte_per_page);
3630 
3631 	/* Please reference to the comment for nand_flash_detect_onfi. */
3632 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3633 	mtd->erasesize *= mtd->writesize;
3634 
3635 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3636 
3637 	/* Please reference to the comment for nand_flash_detect_onfi. */
3638 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3639 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3640 	chip->bits_per_cell = p->bits_per_cell;
3641 
3642 	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3643 		chip->options |= NAND_BUSWIDTH_16;
3644 
3645 	/* ECC info */
3646 	ecc = &p->ecc_info[0];
3647 
3648 	if (ecc->codeword_size >= 9) {
3649 		chip->ecc_strength_ds = ecc->ecc_bits;
3650 		chip->ecc_step_ds = 1 << ecc->codeword_size;
3651 	} else {
3652 		pr_warn("Invalid codeword size\n");
3653 	}
3654 
3655 	return 1;
3656 }
3657 
3658 /*
3659  * nand_id_has_period - Check if an ID string has a given wraparound period
3660  * @id_data: the ID string
3661  * @arrlen: the length of the @id_data array
3662  * @period: the period of repitition
3663  *
3664  * Check if an ID string is repeated within a given sequence of bytes at
3665  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3666  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3667  * if the repetition has a period of @period; otherwise, returns zero.
3668  */
nand_id_has_period(u8 * id_data,int arrlen,int period)3669 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3670 {
3671 	int i, j;
3672 	for (i = 0; i < period; i++)
3673 		for (j = i + period; j < arrlen; j += period)
3674 			if (id_data[i] != id_data[j])
3675 				return 0;
3676 	return 1;
3677 }
3678 
3679 /*
3680  * nand_id_len - Get the length of an ID string returned by CMD_READID
3681  * @id_data: the ID string
3682  * @arrlen: the length of the @id_data array
3683 
3684  * Returns the length of the ID string, according to known wraparound/trailing
3685  * zero patterns. If no pattern exists, returns the length of the array.
3686  */
nand_id_len(u8 * id_data,int arrlen)3687 static int nand_id_len(u8 *id_data, int arrlen)
3688 {
3689 	int last_nonzero, period;
3690 
3691 	/* Find last non-zero byte */
3692 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3693 		if (id_data[last_nonzero])
3694 			break;
3695 
3696 	/* All zeros */
3697 	if (last_nonzero < 0)
3698 		return 0;
3699 
3700 	/* Calculate wraparound period */
3701 	for (period = 1; period < arrlen; period++)
3702 		if (nand_id_has_period(id_data, arrlen, period))
3703 			break;
3704 
3705 	/* There's a repeated pattern */
3706 	if (period < arrlen)
3707 		return period;
3708 
3709 	/* There are trailing zeros */
3710 	if (last_nonzero < arrlen - 1)
3711 		return last_nonzero + 1;
3712 
3713 	/* No pattern detected */
3714 	return arrlen;
3715 }
3716 
3717 /* Extract the bits of per cell from the 3rd byte of the extended ID */
nand_get_bits_per_cell(u8 cellinfo)3718 static int nand_get_bits_per_cell(u8 cellinfo)
3719 {
3720 	int bits;
3721 
3722 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3723 	bits >>= NAND_CI_CELLTYPE_SHIFT;
3724 	return bits + 1;
3725 }
3726 
3727 /*
3728  * Many new NAND share similar device ID codes, which represent the size of the
3729  * chip. The rest of the parameters must be decoded according to generic or
3730  * manufacturer-specific "extended ID" decoding patterns.
3731  */
nand_decode_ext_id(struct nand_chip * chip)3732 void nand_decode_ext_id(struct nand_chip *chip)
3733 {
3734 	struct mtd_info *mtd = nand_to_mtd(chip);
3735 	int extid;
3736 	u8 *id_data = chip->id.data;
3737 	/* The 3rd id byte holds MLC / multichip data */
3738 	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3739 	/* The 4th id byte is the important one */
3740 	extid = id_data[3];
3741 
3742 	/* Calc pagesize */
3743 	mtd->writesize = 1024 << (extid & 0x03);
3744 	extid >>= 2;
3745 	/* Calc oobsize */
3746 	mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3747 	extid >>= 2;
3748 	/* Calc blocksize. Blocksize is multiples of 64KiB */
3749 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
3750 	extid >>= 2;
3751 	/* Get buswidth information */
3752 	if (extid & 0x1)
3753 		chip->options |= NAND_BUSWIDTH_16;
3754 }
3755 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3756 
3757 /*
3758  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3759  * decodes a matching ID table entry and assigns the MTD size parameters for
3760  * the chip.
3761  */
nand_decode_id(struct nand_chip * chip,struct nand_flash_dev * type)3762 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3763 {
3764 	struct mtd_info *mtd = nand_to_mtd(chip);
3765 
3766 	mtd->erasesize = type->erasesize;
3767 	mtd->writesize = type->pagesize;
3768 	mtd->oobsize = mtd->writesize / 32;
3769 
3770 	/* All legacy ID NAND are small-page, SLC */
3771 	chip->bits_per_cell = 1;
3772 }
3773 
3774 /*
3775  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3776  * heuristic patterns using various detected parameters (e.g., manufacturer,
3777  * page size, cell-type information).
3778  */
nand_decode_bbm_options(struct nand_chip * chip)3779 static void nand_decode_bbm_options(struct nand_chip *chip)
3780 {
3781 	struct mtd_info *mtd = nand_to_mtd(chip);
3782 
3783 	/* Set the bad block position */
3784 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3785 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3786 	else
3787 		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3788 }
3789 
is_full_id_nand(struct nand_flash_dev * type)3790 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3791 {
3792 	return type->id_len;
3793 }
3794 
find_full_id_nand(struct nand_chip * chip,struct nand_flash_dev * type)3795 static bool find_full_id_nand(struct nand_chip *chip,
3796 			      struct nand_flash_dev *type)
3797 {
3798 	struct mtd_info *mtd = nand_to_mtd(chip);
3799 	u8 *id_data = chip->id.data;
3800 
3801 	if (!strncmp(type->id, id_data, type->id_len)) {
3802 		mtd->writesize = type->pagesize;
3803 		mtd->erasesize = type->erasesize;
3804 		mtd->oobsize = type->oobsize;
3805 
3806 		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3807 		chip->chipsize = (uint64_t)type->chipsize << 20;
3808 		chip->options |= type->options;
3809 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3810 		chip->ecc_step_ds = NAND_ECC_STEP(type);
3811 		chip->onfi_timing_mode_default =
3812 					type->onfi_timing_mode_default;
3813 
3814 		if (!mtd->name)
3815 			mtd->name = type->name;
3816 
3817 		return true;
3818 	}
3819 	return false;
3820 }
3821 
3822 /*
3823  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3824  * compliant and does not have a full-id or legacy-id entry in the nand_ids
3825  * table.
3826  */
nand_manufacturer_detect(struct nand_chip * chip)3827 static void nand_manufacturer_detect(struct nand_chip *chip)
3828 {
3829 	/*
3830 	 * Try manufacturer detection if available and use
3831 	 * nand_decode_ext_id() otherwise.
3832 	 */
3833 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3834 	    chip->manufacturer.desc->ops->detect) {
3835 		/* The 3rd id byte holds MLC / multichip data */
3836 		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
3837 		chip->manufacturer.desc->ops->detect(chip);
3838 	} else {
3839 		nand_decode_ext_id(chip);
3840 	}
3841 }
3842 
3843 /*
3844  * Manufacturer initialization. This function is called for all NANDs including
3845  * ONFI and JEDEC compliant ones.
3846  * Manufacturer drivers should put all their specific initialization code in
3847  * their ->init() hook.
3848  */
nand_manufacturer_init(struct nand_chip * chip)3849 static int nand_manufacturer_init(struct nand_chip *chip)
3850 {
3851 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3852 	    !chip->manufacturer.desc->ops->init)
3853 		return 0;
3854 
3855 	return chip->manufacturer.desc->ops->init(chip);
3856 }
3857 
3858 /*
3859  * Manufacturer cleanup. This function is called for all NANDs including
3860  * ONFI and JEDEC compliant ones.
3861  * Manufacturer drivers should put all their specific cleanup code in their
3862  * ->cleanup() hook.
3863  */
nand_manufacturer_cleanup(struct nand_chip * chip)3864 static void nand_manufacturer_cleanup(struct nand_chip *chip)
3865 {
3866 	/* Release manufacturer private data */
3867 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3868 	    chip->manufacturer.desc->ops->cleanup)
3869 		chip->manufacturer.desc->ops->cleanup(chip);
3870 }
3871 
3872 /*
3873  * Get the flash and manufacturer id and lookup if the type is supported.
3874  */
nand_detect(struct nand_chip * chip,struct nand_flash_dev * type)3875 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3876 {
3877 	const struct nand_manufacturer *manufacturer;
3878 	struct mtd_info *mtd = nand_to_mtd(chip);
3879 	int busw;
3880 	int i;
3881 	u8 *id_data = chip->id.data;
3882 	u8 maf_id, dev_id;
3883 
3884 	/*
3885 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3886 	 * after power-up.
3887 	 */
3888 	nand_reset(chip, 0);
3889 
3890 	/* Select the device */
3891 	chip->select_chip(mtd, 0);
3892 
3893 	/* Send the command for reading device ID */
3894 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3895 
3896 	/* Read manufacturer and device IDs */
3897 	maf_id = chip->read_byte(mtd);
3898 	dev_id = chip->read_byte(mtd);
3899 
3900 	/*
3901 	 * Try again to make sure, as some systems the bus-hold or other
3902 	 * interface concerns can cause random data which looks like a
3903 	 * possibly credible NAND flash to appear. If the two results do
3904 	 * not match, ignore the device completely.
3905 	 */
3906 
3907 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3908 
3909 	/* Read entire ID string */
3910 	for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
3911 		id_data[i] = chip->read_byte(mtd);
3912 
3913 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
3914 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
3915 			maf_id, dev_id, id_data[0], id_data[1]);
3916 		return -ENODEV;
3917 	}
3918 
3919 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
3920 
3921 	/* Try to identify manufacturer */
3922 	manufacturer = nand_get_manufacturer(maf_id);
3923 	chip->manufacturer.desc = manufacturer;
3924 
3925 	if (!type)
3926 		type = nand_flash_ids;
3927 
3928 	/*
3929 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
3930 	 * override it.
3931 	 * This is required to make sure initial NAND bus width set by the
3932 	 * NAND controller driver is coherent with the real NAND bus width
3933 	 * (extracted by auto-detection code).
3934 	 */
3935 	busw = chip->options & NAND_BUSWIDTH_16;
3936 
3937 	/*
3938 	 * The flag is only set (never cleared), reset it to its default value
3939 	 * before starting auto-detection.
3940 	 */
3941 	chip->options &= ~NAND_BUSWIDTH_16;
3942 
3943 	for (; type->name != NULL; type++) {
3944 		if (is_full_id_nand(type)) {
3945 			if (find_full_id_nand(chip, type))
3946 				goto ident_done;
3947 		} else if (dev_id == type->dev_id) {
3948 			break;
3949 		}
3950 	}
3951 
3952 	chip->onfi_version = 0;
3953 	if (!type->name || !type->pagesize) {
3954 		/* Check if the chip is ONFI compliant */
3955 		if (nand_flash_detect_onfi(chip))
3956 			goto ident_done;
3957 
3958 		/* Check if the chip is JEDEC compliant */
3959 		if (nand_flash_detect_jedec(chip))
3960 			goto ident_done;
3961 	}
3962 
3963 	if (!type->name)
3964 		return -ENODEV;
3965 
3966 	if (!mtd->name)
3967 		mtd->name = type->name;
3968 
3969 	chip->chipsize = (uint64_t)type->chipsize << 20;
3970 
3971 	if (!type->pagesize)
3972 		nand_manufacturer_detect(chip);
3973 	else
3974 		nand_decode_id(chip, type);
3975 
3976 	/* Get chip options */
3977 	chip->options |= type->options;
3978 
3979 ident_done:
3980 
3981 	if (chip->options & NAND_BUSWIDTH_AUTO) {
3982 		WARN_ON(busw & NAND_BUSWIDTH_16);
3983 		nand_set_defaults(chip);
3984 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3985 		/*
3986 		 * Check, if buswidth is correct. Hardware drivers should set
3987 		 * chip correct!
3988 		 */
3989 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
3990 			maf_id, dev_id);
3991 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
3992 			mtd->name);
3993 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
3994 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
3995 		return -EINVAL;
3996 	}
3997 
3998 	nand_decode_bbm_options(chip);
3999 
4000 	/* Calculate the address shift from the page size */
4001 	chip->page_shift = ffs(mtd->writesize) - 1;
4002 	/* Convert chipsize to number of pages per chip -1 */
4003 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4004 
4005 	chip->bbt_erase_shift = chip->phys_erase_shift =
4006 		ffs(mtd->erasesize) - 1;
4007 	if (chip->chipsize & 0xffffffff)
4008 		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4009 	else {
4010 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4011 		chip->chip_shift += 32 - 1;
4012 	}
4013 
4014 	chip->badblockbits = 8;
4015 	chip->erase = single_erase;
4016 
4017 	/* Do not replace user supplied command function! */
4018 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4019 		chip->cmdfunc = nand_command_lp;
4020 
4021 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4022 		maf_id, dev_id);
4023 
4024 	if (chip->onfi_version)
4025 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4026 			chip->onfi_params.model);
4027 	else if (chip->jedec_version)
4028 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4029 			chip->jedec_params.model);
4030 	else
4031 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4032 			type->name);
4033 
4034 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4035 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4036 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4037 	return 0;
4038 }
4039 
4040 static const char * const nand_ecc_modes[] = {
4041 	[NAND_ECC_NONE]		= "none",
4042 	[NAND_ECC_SOFT]		= "soft",
4043 	[NAND_ECC_HW]		= "hw",
4044 	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4045 	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
4046 	[NAND_ECC_ON_DIE]	= "on-die",
4047 };
4048 
of_get_nand_ecc_mode(struct device_node * np)4049 static int of_get_nand_ecc_mode(struct device_node *np)
4050 {
4051 	const char *pm;
4052 	int err, i;
4053 
4054 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4055 	if (err < 0)
4056 		return err;
4057 
4058 	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4059 		if (!strcasecmp(pm, nand_ecc_modes[i]))
4060 			return i;
4061 
4062 	/*
4063 	 * For backward compatibility we support few obsoleted values that don't
4064 	 * have their mappings into nand_ecc_modes_t anymore (they were merged
4065 	 * with other enums).
4066 	 */
4067 	if (!strcasecmp(pm, "soft_bch"))
4068 		return NAND_ECC_SOFT;
4069 
4070 	return -ENODEV;
4071 }
4072 
4073 static const char * const nand_ecc_algos[] = {
4074 	[NAND_ECC_HAMMING]	= "hamming",
4075 	[NAND_ECC_BCH]		= "bch",
4076 };
4077 
of_get_nand_ecc_algo(struct device_node * np)4078 static int of_get_nand_ecc_algo(struct device_node *np)
4079 {
4080 	const char *pm;
4081 	int err, i;
4082 
4083 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
4084 	if (!err) {
4085 		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4086 			if (!strcasecmp(pm, nand_ecc_algos[i]))
4087 				return i;
4088 		return -ENODEV;
4089 	}
4090 
4091 	/*
4092 	 * For backward compatibility we also read "nand-ecc-mode" checking
4093 	 * for some obsoleted values that were specifying ECC algorithm.
4094 	 */
4095 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4096 	if (err < 0)
4097 		return err;
4098 
4099 	if (!strcasecmp(pm, "soft"))
4100 		return NAND_ECC_HAMMING;
4101 	else if (!strcasecmp(pm, "soft_bch"))
4102 		return NAND_ECC_BCH;
4103 
4104 	return -ENODEV;
4105 }
4106 
of_get_nand_ecc_step_size(struct device_node * np)4107 static int of_get_nand_ecc_step_size(struct device_node *np)
4108 {
4109 	int ret;
4110 	u32 val;
4111 
4112 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4113 	return ret ? ret : val;
4114 }
4115 
of_get_nand_ecc_strength(struct device_node * np)4116 static int of_get_nand_ecc_strength(struct device_node *np)
4117 {
4118 	int ret;
4119 	u32 val;
4120 
4121 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4122 	return ret ? ret : val;
4123 }
4124 
of_get_nand_bus_width(struct device_node * np)4125 static int of_get_nand_bus_width(struct device_node *np)
4126 {
4127 	u32 val;
4128 
4129 	if (of_property_read_u32(np, "nand-bus-width", &val))
4130 		return 8;
4131 
4132 	switch (val) {
4133 	case 8:
4134 	case 16:
4135 		return val;
4136 	default:
4137 		return -EIO;
4138 	}
4139 }
4140 
of_get_nand_on_flash_bbt(struct device_node * np)4141 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4142 {
4143 	return of_property_read_bool(np, "nand-on-flash-bbt");
4144 }
4145 
nand_dt_init(struct nand_chip * chip)4146 static int nand_dt_init(struct nand_chip *chip)
4147 {
4148 	struct device_node *dn = nand_get_flash_node(chip);
4149 	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4150 
4151 	if (!dn)
4152 		return 0;
4153 
4154 	if (of_get_nand_bus_width(dn) == 16)
4155 		chip->options |= NAND_BUSWIDTH_16;
4156 
4157 	if (of_get_nand_on_flash_bbt(dn))
4158 		chip->bbt_options |= NAND_BBT_USE_FLASH;
4159 
4160 	ecc_mode = of_get_nand_ecc_mode(dn);
4161 	ecc_algo = of_get_nand_ecc_algo(dn);
4162 	ecc_strength = of_get_nand_ecc_strength(dn);
4163 	ecc_step = of_get_nand_ecc_step_size(dn);
4164 
4165 	if (ecc_mode >= 0)
4166 		chip->ecc.mode = ecc_mode;
4167 
4168 	if (ecc_algo >= 0)
4169 		chip->ecc.algo = ecc_algo;
4170 
4171 	if (ecc_strength >= 0)
4172 		chip->ecc.strength = ecc_strength;
4173 
4174 	if (ecc_step > 0)
4175 		chip->ecc.size = ecc_step;
4176 
4177 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
4178 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
4179 
4180 	return 0;
4181 }
4182 
4183 /**
4184  * nand_scan_ident - [NAND Interface] Scan for the NAND device
4185  * @mtd: MTD device structure
4186  * @maxchips: number of chips to scan for
4187  * @table: alternative NAND ID table
4188  *
4189  * This is the first phase of the normal nand_scan() function. It reads the
4190  * flash ID and sets up MTD fields accordingly.
4191  *
4192  */
nand_scan_ident(struct mtd_info * mtd,int maxchips,struct nand_flash_dev * table)4193 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4194 		    struct nand_flash_dev *table)
4195 {
4196 	int i, nand_maf_id, nand_dev_id;
4197 	struct nand_chip *chip = mtd_to_nand(mtd);
4198 	int ret;
4199 
4200 	ret = nand_dt_init(chip);
4201 	if (ret)
4202 		return ret;
4203 
4204 	if (!mtd->name && mtd->dev.parent)
4205 		mtd->name = dev_name(mtd->dev.parent);
4206 
4207 	if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4208 		/*
4209 		 * Default functions assigned for chip_select() and
4210 		 * cmdfunc() both expect cmd_ctrl() to be populated,
4211 		 * so we need to check that that's the case
4212 		 */
4213 		pr_err("chip.cmd_ctrl() callback is not provided");
4214 		return -EINVAL;
4215 	}
4216 	/* Set the default functions */
4217 	nand_set_defaults(chip);
4218 
4219 	/* Read the flash type */
4220 	ret = nand_detect(chip, table);
4221 	if (ret) {
4222 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4223 			pr_warn("No NAND device found\n");
4224 		chip->select_chip(mtd, -1);
4225 		return ret;
4226 	}
4227 
4228 	nand_maf_id = chip->id.data[0];
4229 	nand_dev_id = chip->id.data[1];
4230 
4231 	chip->select_chip(mtd, -1);
4232 
4233 	/* Check for a chip array */
4234 	for (i = 1; i < maxchips; i++) {
4235 		/* See comment in nand_get_flash_type for reset */
4236 		nand_reset(chip, i);
4237 
4238 		chip->select_chip(mtd, i);
4239 		/* Send the command for reading device ID */
4240 		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4241 		/* Read manufacturer and device IDs */
4242 		if (nand_maf_id != chip->read_byte(mtd) ||
4243 		    nand_dev_id != chip->read_byte(mtd)) {
4244 			chip->select_chip(mtd, -1);
4245 			break;
4246 		}
4247 		chip->select_chip(mtd, -1);
4248 	}
4249 	if (i > 1)
4250 		pr_info("%d chips detected\n", i);
4251 
4252 	/* Store the number of chips and calc total size for mtd */
4253 	chip->numchips = i;
4254 	mtd->size = i * chip->chipsize;
4255 
4256 	return 0;
4257 }
4258 EXPORT_SYMBOL(nand_scan_ident);
4259 
nand_set_ecc_soft_ops(struct mtd_info * mtd)4260 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4261 {
4262 	struct nand_chip *chip = mtd_to_nand(mtd);
4263 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4264 
4265 	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4266 		return -EINVAL;
4267 
4268 	switch (ecc->algo) {
4269 	case NAND_ECC_HAMMING:
4270 		ecc->calculate = nand_calculate_ecc;
4271 		ecc->correct = nand_correct_data;
4272 		ecc->read_page = nand_read_page_swecc;
4273 		ecc->read_subpage = nand_read_subpage;
4274 		ecc->write_page = nand_write_page_swecc;
4275 		ecc->read_page_raw = nand_read_page_raw;
4276 		ecc->write_page_raw = nand_write_page_raw;
4277 		ecc->read_oob = nand_read_oob_std;
4278 		ecc->write_oob = nand_write_oob_std;
4279 		if (!ecc->size)
4280 			ecc->size = 256;
4281 		ecc->bytes = 3;
4282 		ecc->strength = 1;
4283 		return 0;
4284 	case NAND_ECC_BCH:
4285 		if (!mtd_nand_has_bch()) {
4286 			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4287 			return -EINVAL;
4288 		}
4289 		ecc->calculate = nand_bch_calculate_ecc;
4290 		ecc->correct = nand_bch_correct_data;
4291 		ecc->read_page = nand_read_page_swecc;
4292 		ecc->read_subpage = nand_read_subpage;
4293 		ecc->write_page = nand_write_page_swecc;
4294 		ecc->read_page_raw = nand_read_page_raw;
4295 		ecc->write_page_raw = nand_write_page_raw;
4296 		ecc->read_oob = nand_read_oob_std;
4297 		ecc->write_oob = nand_write_oob_std;
4298 
4299 		/*
4300 		* Board driver should supply ecc.size and ecc.strength
4301 		* values to select how many bits are correctable.
4302 		* Otherwise, default to 4 bits for large page devices.
4303 		*/
4304 		if (!ecc->size && (mtd->oobsize >= 64)) {
4305 			ecc->size = 512;
4306 			ecc->strength = 4;
4307 		}
4308 
4309 		/*
4310 		 * if no ecc placement scheme was provided pickup the default
4311 		 * large page one.
4312 		 */
4313 		if (!mtd->ooblayout) {
4314 			/* handle large page devices only */
4315 			if (mtd->oobsize < 64) {
4316 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
4317 				return -EINVAL;
4318 			}
4319 
4320 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4321 
4322 		}
4323 
4324 		/*
4325 		 * We can only maximize ECC config when the default layout is
4326 		 * used, otherwise we don't know how many bytes can really be
4327 		 * used.
4328 		 */
4329 		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4330 		    ecc->options & NAND_ECC_MAXIMIZE) {
4331 			int steps, bytes;
4332 
4333 			/* Always prefer 1k blocks over 512bytes ones */
4334 			ecc->size = 1024;
4335 			steps = mtd->writesize / ecc->size;
4336 
4337 			/* Reserve 2 bytes for the BBM */
4338 			bytes = (mtd->oobsize - 2) / steps;
4339 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
4340 		}
4341 
4342 		/* See nand_bch_init() for details. */
4343 		ecc->bytes = 0;
4344 		ecc->priv = nand_bch_init(mtd);
4345 		if (!ecc->priv) {
4346 			WARN(1, "BCH ECC initialization failed!\n");
4347 			return -EINVAL;
4348 		}
4349 		return 0;
4350 	default:
4351 		WARN(1, "Unsupported ECC algorithm!\n");
4352 		return -EINVAL;
4353 	}
4354 }
4355 
4356 /**
4357  * nand_check_ecc_caps - check the sanity of preset ECC settings
4358  * @chip: nand chip info structure
4359  * @caps: ECC caps info structure
4360  * @oobavail: OOB size that the ECC engine can use
4361  *
4362  * When ECC step size and strength are already set, check if they are supported
4363  * by the controller and the calculated ECC bytes fit within the chip's OOB.
4364  * On success, the calculated ECC bytes is set.
4365  */
nand_check_ecc_caps(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4366 int nand_check_ecc_caps(struct nand_chip *chip,
4367 			const struct nand_ecc_caps *caps, int oobavail)
4368 {
4369 	struct mtd_info *mtd = nand_to_mtd(chip);
4370 	const struct nand_ecc_step_info *stepinfo;
4371 	int preset_step = chip->ecc.size;
4372 	int preset_strength = chip->ecc.strength;
4373 	int nsteps, ecc_bytes;
4374 	int i, j;
4375 
4376 	if (WARN_ON(oobavail < 0))
4377 		return -EINVAL;
4378 
4379 	if (!preset_step || !preset_strength)
4380 		return -ENODATA;
4381 
4382 	nsteps = mtd->writesize / preset_step;
4383 
4384 	for (i = 0; i < caps->nstepinfos; i++) {
4385 		stepinfo = &caps->stepinfos[i];
4386 
4387 		if (stepinfo->stepsize != preset_step)
4388 			continue;
4389 
4390 		for (j = 0; j < stepinfo->nstrengths; j++) {
4391 			if (stepinfo->strengths[j] != preset_strength)
4392 				continue;
4393 
4394 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
4395 							 preset_strength);
4396 			if (WARN_ON_ONCE(ecc_bytes < 0))
4397 				return ecc_bytes;
4398 
4399 			if (ecc_bytes * nsteps > oobavail) {
4400 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4401 				       preset_step, preset_strength);
4402 				return -ENOSPC;
4403 			}
4404 
4405 			chip->ecc.bytes = ecc_bytes;
4406 
4407 			return 0;
4408 		}
4409 	}
4410 
4411 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4412 	       preset_step, preset_strength);
4413 
4414 	return -ENOTSUPP;
4415 }
4416 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4417 
4418 /**
4419  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4420  * @chip: nand chip info structure
4421  * @caps: ECC engine caps info structure
4422  * @oobavail: OOB size that the ECC engine can use
4423  *
4424  * If a chip's ECC requirement is provided, try to meet it with the least
4425  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4426  * On success, the chosen ECC settings are set.
4427  */
nand_match_ecc_req(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4428 int nand_match_ecc_req(struct nand_chip *chip,
4429 		       const struct nand_ecc_caps *caps, int oobavail)
4430 {
4431 	struct mtd_info *mtd = nand_to_mtd(chip);
4432 	const struct nand_ecc_step_info *stepinfo;
4433 	int req_step = chip->ecc_step_ds;
4434 	int req_strength = chip->ecc_strength_ds;
4435 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4436 	int best_step, best_strength, best_ecc_bytes;
4437 	int best_ecc_bytes_total = INT_MAX;
4438 	int i, j;
4439 
4440 	if (WARN_ON(oobavail < 0))
4441 		return -EINVAL;
4442 
4443 	/* No information provided by the NAND chip */
4444 	if (!req_step || !req_strength)
4445 		return -ENOTSUPP;
4446 
4447 	/* number of correctable bits the chip requires in a page */
4448 	req_corr = mtd->writesize / req_step * req_strength;
4449 
4450 	for (i = 0; i < caps->nstepinfos; i++) {
4451 		stepinfo = &caps->stepinfos[i];
4452 		step_size = stepinfo->stepsize;
4453 
4454 		for (j = 0; j < stepinfo->nstrengths; j++) {
4455 			strength = stepinfo->strengths[j];
4456 
4457 			/*
4458 			 * If both step size and strength are smaller than the
4459 			 * chip's requirement, it is not easy to compare the
4460 			 * resulted reliability.
4461 			 */
4462 			if (step_size < req_step && strength < req_strength)
4463 				continue;
4464 
4465 			if (mtd->writesize % step_size)
4466 				continue;
4467 
4468 			nsteps = mtd->writesize / step_size;
4469 
4470 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4471 			if (WARN_ON_ONCE(ecc_bytes < 0))
4472 				continue;
4473 			ecc_bytes_total = ecc_bytes * nsteps;
4474 
4475 			if (ecc_bytes_total > oobavail ||
4476 			    strength * nsteps < req_corr)
4477 				continue;
4478 
4479 			/*
4480 			 * We assume the best is to meet the chip's requrement
4481 			 * with the least number of ECC bytes.
4482 			 */
4483 			if (ecc_bytes_total < best_ecc_bytes_total) {
4484 				best_ecc_bytes_total = ecc_bytes_total;
4485 				best_step = step_size;
4486 				best_strength = strength;
4487 				best_ecc_bytes = ecc_bytes;
4488 			}
4489 		}
4490 	}
4491 
4492 	if (best_ecc_bytes_total == INT_MAX)
4493 		return -ENOTSUPP;
4494 
4495 	chip->ecc.size = best_step;
4496 	chip->ecc.strength = best_strength;
4497 	chip->ecc.bytes = best_ecc_bytes;
4498 
4499 	return 0;
4500 }
4501 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4502 
4503 /**
4504  * nand_maximize_ecc - choose the max ECC strength available
4505  * @chip: nand chip info structure
4506  * @caps: ECC engine caps info structure
4507  * @oobavail: OOB size that the ECC engine can use
4508  *
4509  * Choose the max ECC strength that is supported on the controller, and can fit
4510  * within the chip's OOB.  On success, the chosen ECC settings are set.
4511  */
nand_maximize_ecc(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4512 int nand_maximize_ecc(struct nand_chip *chip,
4513 		      const struct nand_ecc_caps *caps, int oobavail)
4514 {
4515 	struct mtd_info *mtd = nand_to_mtd(chip);
4516 	const struct nand_ecc_step_info *stepinfo;
4517 	int step_size, strength, nsteps, ecc_bytes, corr;
4518 	int best_corr = 0;
4519 	int best_step = 0;
4520 	int best_strength, best_ecc_bytes;
4521 	int i, j;
4522 
4523 	if (WARN_ON(oobavail < 0))
4524 		return -EINVAL;
4525 
4526 	for (i = 0; i < caps->nstepinfos; i++) {
4527 		stepinfo = &caps->stepinfos[i];
4528 		step_size = stepinfo->stepsize;
4529 
4530 		/* If chip->ecc.size is already set, respect it */
4531 		if (chip->ecc.size && step_size != chip->ecc.size)
4532 			continue;
4533 
4534 		for (j = 0; j < stepinfo->nstrengths; j++) {
4535 			strength = stepinfo->strengths[j];
4536 
4537 			if (mtd->writesize % step_size)
4538 				continue;
4539 
4540 			nsteps = mtd->writesize / step_size;
4541 
4542 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4543 			if (WARN_ON_ONCE(ecc_bytes < 0))
4544 				continue;
4545 
4546 			if (ecc_bytes * nsteps > oobavail)
4547 				continue;
4548 
4549 			corr = strength * nsteps;
4550 
4551 			/*
4552 			 * If the number of correctable bits is the same,
4553 			 * bigger step_size has more reliability.
4554 			 */
4555 			if (corr > best_corr ||
4556 			    (corr == best_corr && step_size > best_step)) {
4557 				best_corr = corr;
4558 				best_step = step_size;
4559 				best_strength = strength;
4560 				best_ecc_bytes = ecc_bytes;
4561 			}
4562 		}
4563 	}
4564 
4565 	if (!best_corr)
4566 		return -ENOTSUPP;
4567 
4568 	chip->ecc.size = best_step;
4569 	chip->ecc.strength = best_strength;
4570 	chip->ecc.bytes = best_ecc_bytes;
4571 
4572 	return 0;
4573 }
4574 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4575 
4576 /*
4577  * Check if the chip configuration meet the datasheet requirements.
4578 
4579  * If our configuration corrects A bits per B bytes and the minimum
4580  * required correction level is X bits per Y bytes, then we must ensure
4581  * both of the following are true:
4582  *
4583  * (1) A / B >= X / Y
4584  * (2) A >= X
4585  *
4586  * Requirement (1) ensures we can correct for the required bitflip density.
4587  * Requirement (2) ensures we can correct even when all bitflips are clumped
4588  * in the same sector.
4589  */
nand_ecc_strength_good(struct mtd_info * mtd)4590 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4591 {
4592 	struct nand_chip *chip = mtd_to_nand(mtd);
4593 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4594 	int corr, ds_corr;
4595 
4596 	if (ecc->size == 0 || chip->ecc_step_ds == 0)
4597 		/* Not enough information */
4598 		return true;
4599 
4600 	/*
4601 	 * We get the number of corrected bits per page to compare
4602 	 * the correction density.
4603 	 */
4604 	corr = (mtd->writesize * ecc->strength) / ecc->size;
4605 	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4606 
4607 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4608 }
4609 
invalid_ecc_page_accessors(struct nand_chip * chip)4610 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4611 {
4612 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4613 
4614 	if (nand_standard_page_accessors(ecc))
4615 		return false;
4616 
4617 	/*
4618 	 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4619 	 * controller driver implements all the page accessors because
4620 	 * default helpers are not suitable when the core does not
4621 	 * send the READ0/PAGEPROG commands.
4622 	 */
4623 	return (!ecc->read_page || !ecc->write_page ||
4624 		!ecc->read_page_raw || !ecc->write_page_raw ||
4625 		(NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4626 		(NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4627 		 ecc->hwctl && ecc->calculate));
4628 }
4629 
4630 /**
4631  * nand_scan_tail - [NAND Interface] Scan for the NAND device
4632  * @mtd: MTD device structure
4633  *
4634  * This is the second phase of the normal nand_scan() function. It fills out
4635  * all the uninitialized function pointers with the defaults and scans for a
4636  * bad block table if appropriate.
4637  */
nand_scan_tail(struct mtd_info * mtd)4638 int nand_scan_tail(struct mtd_info *mtd)
4639 {
4640 	struct nand_chip *chip = mtd_to_nand(mtd);
4641 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4642 	struct nand_buffers *nbuf = NULL;
4643 	int ret, i;
4644 
4645 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
4646 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4647 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4648 		return -EINVAL;
4649 	}
4650 
4651 	if (invalid_ecc_page_accessors(chip)) {
4652 		pr_err("Invalid ECC page accessors setup\n");
4653 		return -EINVAL;
4654 	}
4655 
4656 	if (!(chip->options & NAND_OWN_BUFFERS)) {
4657 		nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4658 		if (!nbuf)
4659 			return -ENOMEM;
4660 
4661 		nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4662 		if (!nbuf->ecccalc) {
4663 			ret = -ENOMEM;
4664 			goto err_free_nbuf;
4665 		}
4666 
4667 		nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4668 		if (!nbuf->ecccode) {
4669 			ret = -ENOMEM;
4670 			goto err_free_nbuf;
4671 		}
4672 
4673 		nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4674 					GFP_KERNEL);
4675 		if (!nbuf->databuf) {
4676 			ret = -ENOMEM;
4677 			goto err_free_nbuf;
4678 		}
4679 
4680 		chip->buffers = nbuf;
4681 	} else if (!chip->buffers) {
4682 		return -ENOMEM;
4683 	}
4684 
4685 	/*
4686 	 * FIXME: some NAND manufacturer drivers expect the first die to be
4687 	 * selected when manufacturer->init() is called. They should be fixed
4688 	 * to explictly select the relevant die when interacting with the NAND
4689 	 * chip.
4690 	 */
4691 	chip->select_chip(mtd, 0);
4692 	ret = nand_manufacturer_init(chip);
4693 	chip->select_chip(mtd, -1);
4694 	if (ret)
4695 		goto err_free_nbuf;
4696 
4697 	/* Set the internal oob buffer location, just after the page data */
4698 	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4699 
4700 	/*
4701 	 * If no default placement scheme is given, select an appropriate one.
4702 	 */
4703 	if (!mtd->ooblayout &&
4704 	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4705 		switch (mtd->oobsize) {
4706 		case 8:
4707 		case 16:
4708 			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4709 			break;
4710 		case 64:
4711 		case 128:
4712 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4713 			break;
4714 		default:
4715 			WARN(1, "No oob scheme defined for oobsize %d\n",
4716 				mtd->oobsize);
4717 			ret = -EINVAL;
4718 			goto err_nand_manuf_cleanup;
4719 		}
4720 	}
4721 
4722 	/*
4723 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4724 	 * selected and we have 256 byte pagesize fallback to software ECC
4725 	 */
4726 
4727 	switch (ecc->mode) {
4728 	case NAND_ECC_HW_OOB_FIRST:
4729 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
4730 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4731 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4732 			ret = -EINVAL;
4733 			goto err_nand_manuf_cleanup;
4734 		}
4735 		if (!ecc->read_page)
4736 			ecc->read_page = nand_read_page_hwecc_oob_first;
4737 
4738 	case NAND_ECC_HW:
4739 		/* Use standard hwecc read page function? */
4740 		if (!ecc->read_page)
4741 			ecc->read_page = nand_read_page_hwecc;
4742 		if (!ecc->write_page)
4743 			ecc->write_page = nand_write_page_hwecc;
4744 		if (!ecc->read_page_raw)
4745 			ecc->read_page_raw = nand_read_page_raw;
4746 		if (!ecc->write_page_raw)
4747 			ecc->write_page_raw = nand_write_page_raw;
4748 		if (!ecc->read_oob)
4749 			ecc->read_oob = nand_read_oob_std;
4750 		if (!ecc->write_oob)
4751 			ecc->write_oob = nand_write_oob_std;
4752 		if (!ecc->read_subpage)
4753 			ecc->read_subpage = nand_read_subpage;
4754 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4755 			ecc->write_subpage = nand_write_subpage_hwecc;
4756 
4757 	case NAND_ECC_HW_SYNDROME:
4758 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4759 		    (!ecc->read_page ||
4760 		     ecc->read_page == nand_read_page_hwecc ||
4761 		     !ecc->write_page ||
4762 		     ecc->write_page == nand_write_page_hwecc)) {
4763 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4764 			ret = -EINVAL;
4765 			goto err_nand_manuf_cleanup;
4766 		}
4767 		/* Use standard syndrome read/write page function? */
4768 		if (!ecc->read_page)
4769 			ecc->read_page = nand_read_page_syndrome;
4770 		if (!ecc->write_page)
4771 			ecc->write_page = nand_write_page_syndrome;
4772 		if (!ecc->read_page_raw)
4773 			ecc->read_page_raw = nand_read_page_raw_syndrome;
4774 		if (!ecc->write_page_raw)
4775 			ecc->write_page_raw = nand_write_page_raw_syndrome;
4776 		if (!ecc->read_oob)
4777 			ecc->read_oob = nand_read_oob_syndrome;
4778 		if (!ecc->write_oob)
4779 			ecc->write_oob = nand_write_oob_syndrome;
4780 
4781 		if (mtd->writesize >= ecc->size) {
4782 			if (!ecc->strength) {
4783 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4784 				ret = -EINVAL;
4785 				goto err_nand_manuf_cleanup;
4786 			}
4787 			break;
4788 		}
4789 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4790 			ecc->size, mtd->writesize);
4791 		ecc->mode = NAND_ECC_SOFT;
4792 		ecc->algo = NAND_ECC_HAMMING;
4793 
4794 	case NAND_ECC_SOFT:
4795 		ret = nand_set_ecc_soft_ops(mtd);
4796 		if (ret) {
4797 			ret = -EINVAL;
4798 			goto err_nand_manuf_cleanup;
4799 		}
4800 		break;
4801 
4802 	case NAND_ECC_ON_DIE:
4803 		if (!ecc->read_page || !ecc->write_page) {
4804 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
4805 			ret = -EINVAL;
4806 			goto err_nand_manuf_cleanup;
4807 		}
4808 		if (!ecc->read_oob)
4809 			ecc->read_oob = nand_read_oob_std;
4810 		if (!ecc->write_oob)
4811 			ecc->write_oob = nand_write_oob_std;
4812 		break;
4813 
4814 	case NAND_ECC_NONE:
4815 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4816 		ecc->read_page = nand_read_page_raw;
4817 		ecc->write_page = nand_write_page_raw;
4818 		ecc->read_oob = nand_read_oob_std;
4819 		ecc->read_page_raw = nand_read_page_raw;
4820 		ecc->write_page_raw = nand_write_page_raw;
4821 		ecc->write_oob = nand_write_oob_std;
4822 		ecc->size = mtd->writesize;
4823 		ecc->bytes = 0;
4824 		ecc->strength = 0;
4825 		break;
4826 
4827 	default:
4828 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4829 		ret = -EINVAL;
4830 		goto err_nand_manuf_cleanup;
4831 	}
4832 
4833 	/* For many systems, the standard OOB write also works for raw */
4834 	if (!ecc->read_oob_raw)
4835 		ecc->read_oob_raw = ecc->read_oob;
4836 	if (!ecc->write_oob_raw)
4837 		ecc->write_oob_raw = ecc->write_oob;
4838 
4839 	/* propagate ecc info to mtd_info */
4840 	mtd->ecc_strength = ecc->strength;
4841 	mtd->ecc_step_size = ecc->size;
4842 
4843 	/*
4844 	 * Set the number of read / write steps for one page depending on ECC
4845 	 * mode.
4846 	 */
4847 	ecc->steps = mtd->writesize / ecc->size;
4848 	if (ecc->steps * ecc->size != mtd->writesize) {
4849 		WARN(1, "Invalid ECC parameters\n");
4850 		ret = -EINVAL;
4851 		goto err_nand_manuf_cleanup;
4852 	}
4853 	ecc->total = ecc->steps * ecc->bytes;
4854 	if (ecc->total > mtd->oobsize) {
4855 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
4856 		ret = -EINVAL;
4857 		goto err_nand_manuf_cleanup;
4858 	}
4859 
4860 	/*
4861 	 * The number of bytes available for a client to place data into
4862 	 * the out of band area.
4863 	 */
4864 	ret = mtd_ooblayout_count_freebytes(mtd);
4865 	if (ret < 0)
4866 		ret = 0;
4867 
4868 	mtd->oobavail = ret;
4869 
4870 	/* ECC sanity check: warn if it's too weak */
4871 	if (!nand_ecc_strength_good(mtd))
4872 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4873 			mtd->name);
4874 
4875 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4876 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4877 		switch (ecc->steps) {
4878 		case 2:
4879 			mtd->subpage_sft = 1;
4880 			break;
4881 		case 4:
4882 		case 8:
4883 		case 16:
4884 			mtd->subpage_sft = 2;
4885 			break;
4886 		}
4887 	}
4888 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4889 
4890 	/* Initialize state */
4891 	chip->state = FL_READY;
4892 
4893 	/* Invalidate the pagebuffer reference */
4894 	chip->pagebuf = -1;
4895 
4896 	/* Large page NAND with SOFT_ECC should support subpage reads */
4897 	switch (ecc->mode) {
4898 	case NAND_ECC_SOFT:
4899 		if (chip->page_shift > 9)
4900 			chip->options |= NAND_SUBPAGE_READ;
4901 		break;
4902 
4903 	default:
4904 		break;
4905 	}
4906 
4907 	/* Fill in remaining MTD driver data */
4908 	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4909 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4910 						MTD_CAP_NANDFLASH;
4911 	mtd->_erase = nand_erase;
4912 	mtd->_point = NULL;
4913 	mtd->_unpoint = NULL;
4914 	mtd->_read = nand_read;
4915 	mtd->_write = nand_write;
4916 	mtd->_panic_write = panic_nand_write;
4917 	mtd->_read_oob = nand_read_oob;
4918 	mtd->_write_oob = nand_write_oob;
4919 	mtd->_sync = nand_sync;
4920 	mtd->_lock = NULL;
4921 	mtd->_unlock = NULL;
4922 	mtd->_suspend = nand_suspend;
4923 	mtd->_resume = nand_resume;
4924 	mtd->_reboot = nand_shutdown;
4925 	mtd->_block_isreserved = nand_block_isreserved;
4926 	mtd->_block_isbad = nand_block_isbad;
4927 	mtd->_block_markbad = nand_block_markbad;
4928 	mtd->_max_bad_blocks = nand_max_bad_blocks;
4929 	mtd->writebufsize = mtd->writesize;
4930 
4931 	/*
4932 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4933 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4934 	 * properly set.
4935 	 */
4936 	if (!mtd->bitflip_threshold)
4937 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4938 
4939 	/* Initialize the ->data_interface field. */
4940 	ret = nand_init_data_interface(chip);
4941 	if (ret)
4942 		goto err_nand_manuf_cleanup;
4943 
4944 	/* Enter fastest possible mode on all dies. */
4945 	for (i = 0; i < chip->numchips; i++) {
4946 		chip->select_chip(mtd, i);
4947 		ret = nand_setup_data_interface(chip, i);
4948 		chip->select_chip(mtd, -1);
4949 
4950 		if (ret)
4951 			goto err_nand_data_iface_cleanup;
4952 	}
4953 
4954 	/* Check, if we should skip the bad block table scan */
4955 	if (chip->options & NAND_SKIP_BBTSCAN)
4956 		return 0;
4957 
4958 	/* Build bad block table */
4959 	ret = chip->scan_bbt(mtd);
4960 	if (ret)
4961 		goto err_nand_data_iface_cleanup;
4962 
4963 	return 0;
4964 
4965 err_nand_data_iface_cleanup:
4966 	nand_release_data_interface(chip);
4967 
4968 err_nand_manuf_cleanup:
4969 	nand_manufacturer_cleanup(chip);
4970 
4971 err_free_nbuf:
4972 	if (nbuf) {
4973 		kfree(nbuf->databuf);
4974 		kfree(nbuf->ecccode);
4975 		kfree(nbuf->ecccalc);
4976 		kfree(nbuf);
4977 	}
4978 
4979 	return ret;
4980 }
4981 EXPORT_SYMBOL(nand_scan_tail);
4982 
4983 /*
4984  * is_module_text_address() isn't exported, and it's mostly a pointless
4985  * test if this is a module _anyway_ -- they'd have to try _really_ hard
4986  * to call us from in-kernel code if the core NAND support is modular.
4987  */
4988 #ifdef MODULE
4989 #define caller_is_module() (1)
4990 #else
4991 #define caller_is_module() \
4992 	is_module_text_address((unsigned long)__builtin_return_address(0))
4993 #endif
4994 
4995 /**
4996  * nand_scan - [NAND Interface] Scan for the NAND device
4997  * @mtd: MTD device structure
4998  * @maxchips: number of chips to scan for
4999  *
5000  * This fills out all the uninitialized function pointers with the defaults.
5001  * The flash ID is read and the mtd/chip structures are filled with the
5002  * appropriate values.
5003  */
nand_scan(struct mtd_info * mtd,int maxchips)5004 int nand_scan(struct mtd_info *mtd, int maxchips)
5005 {
5006 	int ret;
5007 
5008 	ret = nand_scan_ident(mtd, maxchips, NULL);
5009 	if (!ret)
5010 		ret = nand_scan_tail(mtd);
5011 	return ret;
5012 }
5013 EXPORT_SYMBOL(nand_scan);
5014 
5015 /**
5016  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5017  * @chip: NAND chip object
5018  */
nand_cleanup(struct nand_chip * chip)5019 void nand_cleanup(struct nand_chip *chip)
5020 {
5021 	if (chip->ecc.mode == NAND_ECC_SOFT &&
5022 	    chip->ecc.algo == NAND_ECC_BCH)
5023 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5024 
5025 	nand_release_data_interface(chip);
5026 
5027 	/* Free bad block table memory */
5028 	kfree(chip->bbt);
5029 	if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
5030 		kfree(chip->buffers->databuf);
5031 		kfree(chip->buffers->ecccode);
5032 		kfree(chip->buffers->ecccalc);
5033 		kfree(chip->buffers);
5034 	}
5035 
5036 	/* Free bad block descriptor memory */
5037 	if (chip->badblock_pattern && chip->badblock_pattern->options
5038 			& NAND_BBT_DYNAMICSTRUCT)
5039 		kfree(chip->badblock_pattern);
5040 
5041 	/* Free manufacturer priv data. */
5042 	nand_manufacturer_cleanup(chip);
5043 }
5044 EXPORT_SYMBOL_GPL(nand_cleanup);
5045 
5046 /**
5047  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5048  *		  held by the NAND device
5049  * @mtd: MTD device structure
5050  */
nand_release(struct mtd_info * mtd)5051 void nand_release(struct mtd_info *mtd)
5052 {
5053 	mtd_device_unregister(mtd);
5054 	nand_cleanup(mtd_to_nand(mtd));
5055 }
5056 EXPORT_SYMBOL_GPL(nand_release);
5057 
5058 MODULE_LICENSE("GPL");
5059 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5060 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5061 MODULE_DESCRIPTION("Generic NAND flash driver code");
5062