• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Freescale GPMI NAND Flash Driver
3  *
4  * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
5  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 #include <linux/clk.h>
22 #include <linux/slab.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/mtd/partitions.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include "gpmi-nand.h"
30 #include "bch-regs.h"
31 
32 /* Resource names for the GPMI NAND driver. */
33 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
34 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
35 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
36 
37 /* add our owner bbt descriptor */
38 static uint8_t scan_ff_pattern[] = { 0xff };
39 static struct nand_bbt_descr gpmi_bbt_descr = {
40 	.options	= 0,
41 	.offs		= 0,
42 	.len		= 1,
43 	.pattern	= scan_ff_pattern
44 };
45 
46 /*
47  * We may change the layout if we can get the ECC info from the datasheet,
48  * else we will use all the (page + OOB).
49  */
gpmi_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)50 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
51 			      struct mtd_oob_region *oobregion)
52 {
53 	struct nand_chip *chip = mtd_to_nand(mtd);
54 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
55 	struct bch_geometry *geo = &this->bch_geometry;
56 
57 	if (section)
58 		return -ERANGE;
59 
60 	oobregion->offset = 0;
61 	oobregion->length = geo->page_size - mtd->writesize;
62 
63 	return 0;
64 }
65 
gpmi_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)66 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
67 			       struct mtd_oob_region *oobregion)
68 {
69 	struct nand_chip *chip = mtd_to_nand(mtd);
70 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
71 	struct bch_geometry *geo = &this->bch_geometry;
72 
73 	if (section)
74 		return -ERANGE;
75 
76 	/* The available oob size we have. */
77 	if (geo->page_size < mtd->writesize + mtd->oobsize) {
78 		oobregion->offset = geo->page_size - mtd->writesize;
79 		oobregion->length = mtd->oobsize - oobregion->offset;
80 	}
81 
82 	return 0;
83 }
84 
85 static const char * const gpmi_clks_for_mx2x[] = {
86 	"gpmi_io",
87 };
88 
89 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
90 	.ecc = gpmi_ooblayout_ecc,
91 	.free = gpmi_ooblayout_free,
92 };
93 
94 static const struct gpmi_devdata gpmi_devdata_imx23 = {
95 	.type = IS_MX23,
96 	.bch_max_ecc_strength = 20,
97 	.max_chain_delay = 16,
98 	.clks = gpmi_clks_for_mx2x,
99 	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
100 };
101 
102 static const struct gpmi_devdata gpmi_devdata_imx28 = {
103 	.type = IS_MX28,
104 	.bch_max_ecc_strength = 20,
105 	.max_chain_delay = 16,
106 	.clks = gpmi_clks_for_mx2x,
107 	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
108 };
109 
110 static const char * const gpmi_clks_for_mx6[] = {
111 	"gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
112 };
113 
114 static const struct gpmi_devdata gpmi_devdata_imx6q = {
115 	.type = IS_MX6Q,
116 	.bch_max_ecc_strength = 40,
117 	.max_chain_delay = 12,
118 	.clks = gpmi_clks_for_mx6,
119 	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
120 };
121 
122 static const struct gpmi_devdata gpmi_devdata_imx6sx = {
123 	.type = IS_MX6SX,
124 	.bch_max_ecc_strength = 62,
125 	.max_chain_delay = 12,
126 	.clks = gpmi_clks_for_mx6,
127 	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
128 };
129 
130 static const char * const gpmi_clks_for_mx7d[] = {
131 	"gpmi_io", "gpmi_bch_apb",
132 };
133 
134 static const struct gpmi_devdata gpmi_devdata_imx7d = {
135 	.type = IS_MX7D,
136 	.bch_max_ecc_strength = 62,
137 	.max_chain_delay = 12,
138 	.clks = gpmi_clks_for_mx7d,
139 	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
140 };
141 
bch_irq(int irq,void * cookie)142 static irqreturn_t bch_irq(int irq, void *cookie)
143 {
144 	struct gpmi_nand_data *this = cookie;
145 
146 	gpmi_clear_bch(this);
147 	complete(&this->bch_done);
148 	return IRQ_HANDLED;
149 }
150 
151 /*
152  *  Calculate the ECC strength by hand:
153  *	E : The ECC strength.
154  *	G : the length of Galois Field.
155  *	N : The chunk count of per page.
156  *	O : the oobsize of the NAND chip.
157  *	M : the metasize of per page.
158  *
159  *	The formula is :
160  *		E * G * N
161  *	      ------------ <= (O - M)
162  *                  8
163  *
164  *      So, we get E by:
165  *                    (O - M) * 8
166  *              E <= -------------
167  *                       G * N
168  */
get_ecc_strength(struct gpmi_nand_data * this)169 static inline int get_ecc_strength(struct gpmi_nand_data *this)
170 {
171 	struct bch_geometry *geo = &this->bch_geometry;
172 	struct mtd_info	*mtd = nand_to_mtd(&this->nand);
173 	int ecc_strength;
174 
175 	ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
176 			/ (geo->gf_len * geo->ecc_chunk_count);
177 
178 	/* We need the minor even number. */
179 	return round_down(ecc_strength, 2);
180 }
181 
gpmi_check_ecc(struct gpmi_nand_data * this)182 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
183 {
184 	struct bch_geometry *geo = &this->bch_geometry;
185 
186 	/* Do the sanity check. */
187 	if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
188 		/* The mx23/mx28 only support the GF13. */
189 		if (geo->gf_len == 14)
190 			return false;
191 	}
192 	return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
193 }
194 
195 /*
196  * If we can get the ECC information from the nand chip, we do not
197  * need to calculate them ourselves.
198  *
199  * We may have available oob space in this case.
200  */
set_geometry_by_ecc_info(struct gpmi_nand_data * this)201 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
202 {
203 	struct bch_geometry *geo = &this->bch_geometry;
204 	struct nand_chip *chip = &this->nand;
205 	struct mtd_info *mtd = nand_to_mtd(chip);
206 	unsigned int block_mark_bit_offset;
207 
208 	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
209 		return -EINVAL;
210 
211 	switch (chip->ecc_step_ds) {
212 	case SZ_512:
213 		geo->gf_len = 13;
214 		break;
215 	case SZ_1K:
216 		geo->gf_len = 14;
217 		break;
218 	default:
219 		dev_err(this->dev,
220 			"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
221 			chip->ecc_strength_ds, chip->ecc_step_ds);
222 		return -EINVAL;
223 	}
224 	geo->ecc_chunk_size = chip->ecc_step_ds;
225 	geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
226 	if (!gpmi_check_ecc(this))
227 		return -EINVAL;
228 
229 	/* Keep the C >= O */
230 	if (geo->ecc_chunk_size < mtd->oobsize) {
231 		dev_err(this->dev,
232 			"unsupported nand chip. ecc size: %d, oob size : %d\n",
233 			chip->ecc_step_ds, mtd->oobsize);
234 		return -EINVAL;
235 	}
236 
237 	/* The default value, see comment in the legacy_set_geometry(). */
238 	geo->metadata_size = 10;
239 
240 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
241 
242 	/*
243 	 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
244 	 *
245 	 *    |                          P                            |
246 	 *    |<----------------------------------------------------->|
247 	 *    |                                                       |
248 	 *    |                                        (Block Mark)   |
249 	 *    |                      P'                      |      | |     |
250 	 *    |<-------------------------------------------->|  D   | |  O' |
251 	 *    |                                              |<---->| |<--->|
252 	 *    V                                              V      V V     V
253 	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
254 	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
255 	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
256 	 *                                                   ^              ^
257 	 *                                                   |      O       |
258 	 *                                                   |<------------>|
259 	 *                                                   |              |
260 	 *
261 	 *	P : the page size for BCH module.
262 	 *	E : The ECC strength.
263 	 *	G : the length of Galois Field.
264 	 *	N : The chunk count of per page.
265 	 *	M : the metasize of per page.
266 	 *	C : the ecc chunk size, aka the "data" above.
267 	 *	P': the nand chip's page size.
268 	 *	O : the nand chip's oob size.
269 	 *	O': the free oob.
270 	 *
271 	 *	The formula for P is :
272 	 *
273 	 *	            E * G * N
274 	 *	       P = ------------ + P' + M
275 	 *                      8
276 	 *
277 	 * The position of block mark moves forward in the ECC-based view
278 	 * of page, and the delta is:
279 	 *
280 	 *                   E * G * (N - 1)
281 	 *             D = (---------------- + M)
282 	 *                          8
283 	 *
284 	 * Please see the comment in legacy_set_geometry().
285 	 * With the condition C >= O , we still can get same result.
286 	 * So the bit position of the physical block mark within the ECC-based
287 	 * view of the page is :
288 	 *             (P' - D) * 8
289 	 */
290 	geo->page_size = mtd->writesize + geo->metadata_size +
291 		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
292 
293 	geo->payload_size = mtd->writesize;
294 
295 	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
296 	geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
297 				+ ALIGN(geo->ecc_chunk_count, 4);
298 
299 	if (!this->swap_block_mark)
300 		return 0;
301 
302 	/* For bit swap. */
303 	block_mark_bit_offset = mtd->writesize * 8 -
304 		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
305 				+ geo->metadata_size * 8);
306 
307 	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
308 	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
309 	return 0;
310 }
311 
legacy_set_geometry(struct gpmi_nand_data * this)312 static int legacy_set_geometry(struct gpmi_nand_data *this)
313 {
314 	struct bch_geometry *geo = &this->bch_geometry;
315 	struct mtd_info *mtd = nand_to_mtd(&this->nand);
316 	unsigned int metadata_size;
317 	unsigned int status_size;
318 	unsigned int block_mark_bit_offset;
319 
320 	/*
321 	 * The size of the metadata can be changed, though we set it to 10
322 	 * bytes now. But it can't be too large, because we have to save
323 	 * enough space for BCH.
324 	 */
325 	geo->metadata_size = 10;
326 
327 	/* The default for the length of Galois Field. */
328 	geo->gf_len = 13;
329 
330 	/* The default for chunk size. */
331 	geo->ecc_chunk_size = 512;
332 	while (geo->ecc_chunk_size < mtd->oobsize) {
333 		geo->ecc_chunk_size *= 2; /* keep C >= O */
334 		geo->gf_len = 14;
335 	}
336 
337 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
338 
339 	/* We use the same ECC strength for all chunks. */
340 	geo->ecc_strength = get_ecc_strength(this);
341 	if (!gpmi_check_ecc(this)) {
342 		dev_err(this->dev,
343 			"ecc strength: %d cannot be supported by the controller (%d)\n"
344 			"try to use minimum ecc strength that NAND chip required\n",
345 			geo->ecc_strength,
346 			this->devdata->bch_max_ecc_strength);
347 		return -EINVAL;
348 	}
349 
350 	geo->page_size = mtd->writesize + geo->metadata_size +
351 		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
352 	geo->payload_size = mtd->writesize;
353 
354 	/*
355 	 * The auxiliary buffer contains the metadata and the ECC status. The
356 	 * metadata is padded to the nearest 32-bit boundary. The ECC status
357 	 * contains one byte for every ECC chunk, and is also padded to the
358 	 * nearest 32-bit boundary.
359 	 */
360 	metadata_size = ALIGN(geo->metadata_size, 4);
361 	status_size   = ALIGN(geo->ecc_chunk_count, 4);
362 
363 	geo->auxiliary_size = metadata_size + status_size;
364 	geo->auxiliary_status_offset = metadata_size;
365 
366 	if (!this->swap_block_mark)
367 		return 0;
368 
369 	/*
370 	 * We need to compute the byte and bit offsets of
371 	 * the physical block mark within the ECC-based view of the page.
372 	 *
373 	 * NAND chip with 2K page shows below:
374 	 *                                             (Block Mark)
375 	 *                                                   |      |
376 	 *                                                   |  D   |
377 	 *                                                   |<---->|
378 	 *                                                   V      V
379 	 *    +---+----------+-+----------+-+----------+-+----------+-+
380 	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
381 	 *    +---+----------+-+----------+-+----------+-+----------+-+
382 	 *
383 	 * The position of block mark moves forward in the ECC-based view
384 	 * of page, and the delta is:
385 	 *
386 	 *                   E * G * (N - 1)
387 	 *             D = (---------------- + M)
388 	 *                          8
389 	 *
390 	 * With the formula to compute the ECC strength, and the condition
391 	 *       : C >= O         (C is the ecc chunk size)
392 	 *
393 	 * It's easy to deduce to the following result:
394 	 *
395 	 *         E * G       (O - M)      C - M         C - M
396 	 *      ----------- <= ------- <=  --------  <  ---------
397 	 *           8            N           N          (N - 1)
398 	 *
399 	 *  So, we get:
400 	 *
401 	 *                   E * G * (N - 1)
402 	 *             D = (---------------- + M) < C
403 	 *                          8
404 	 *
405 	 *  The above inequality means the position of block mark
406 	 *  within the ECC-based view of the page is still in the data chunk,
407 	 *  and it's NOT in the ECC bits of the chunk.
408 	 *
409 	 *  Use the following to compute the bit position of the
410 	 *  physical block mark within the ECC-based view of the page:
411 	 *          (page_size - D) * 8
412 	 *
413 	 *  --Huang Shijie
414 	 */
415 	block_mark_bit_offset = mtd->writesize * 8 -
416 		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
417 				+ geo->metadata_size * 8);
418 
419 	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
420 	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
421 	return 0;
422 }
423 
common_nfc_set_geometry(struct gpmi_nand_data * this)424 int common_nfc_set_geometry(struct gpmi_nand_data *this)
425 {
426 	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
427 				|| legacy_set_geometry(this))
428 		return set_geometry_by_ecc_info(this);
429 
430 	return 0;
431 }
432 
get_dma_chan(struct gpmi_nand_data * this)433 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
434 {
435 	/* We use the DMA channel 0 to access all the nand chips. */
436 	return this->dma_chans[0];
437 }
438 
439 /* Can we use the upper's buffer directly for DMA? */
prepare_data_dma(struct gpmi_nand_data * this,enum dma_data_direction dr)440 void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
441 {
442 	struct scatterlist *sgl = &this->data_sgl;
443 	int ret;
444 
445 	/* first try to map the upper buffer directly */
446 	if (virt_addr_valid(this->upper_buf) &&
447 		!object_is_on_stack(this->upper_buf)) {
448 		sg_init_one(sgl, this->upper_buf, this->upper_len);
449 		ret = dma_map_sg(this->dev, sgl, 1, dr);
450 		if (ret == 0)
451 			goto map_fail;
452 
453 		this->direct_dma_map_ok = true;
454 		return;
455 	}
456 
457 map_fail:
458 	/* We have to use our own DMA buffer. */
459 	sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
460 
461 	if (dr == DMA_TO_DEVICE)
462 		memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
463 
464 	dma_map_sg(this->dev, sgl, 1, dr);
465 
466 	this->direct_dma_map_ok = false;
467 }
468 
469 /* This will be called after the DMA operation is finished. */
dma_irq_callback(void * param)470 static void dma_irq_callback(void *param)
471 {
472 	struct gpmi_nand_data *this = param;
473 	struct completion *dma_c = &this->dma_done;
474 
475 	switch (this->dma_type) {
476 	case DMA_FOR_COMMAND:
477 		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
478 		break;
479 
480 	case DMA_FOR_READ_DATA:
481 		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
482 		if (this->direct_dma_map_ok == false)
483 			memcpy(this->upper_buf, this->data_buffer_dma,
484 				this->upper_len);
485 		break;
486 
487 	case DMA_FOR_WRITE_DATA:
488 		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
489 		break;
490 
491 	case DMA_FOR_READ_ECC_PAGE:
492 	case DMA_FOR_WRITE_ECC_PAGE:
493 		/* We have to wait the BCH interrupt to finish. */
494 		break;
495 
496 	default:
497 		dev_err(this->dev, "in wrong DMA operation.\n");
498 	}
499 
500 	complete(dma_c);
501 }
502 
start_dma_without_bch_irq(struct gpmi_nand_data * this,struct dma_async_tx_descriptor * desc)503 int start_dma_without_bch_irq(struct gpmi_nand_data *this,
504 				struct dma_async_tx_descriptor *desc)
505 {
506 	struct completion *dma_c = &this->dma_done;
507 	unsigned long timeout;
508 
509 	init_completion(dma_c);
510 
511 	desc->callback		= dma_irq_callback;
512 	desc->callback_param	= this;
513 	dmaengine_submit(desc);
514 	dma_async_issue_pending(get_dma_chan(this));
515 
516 	/* Wait for the interrupt from the DMA block. */
517 	timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
518 	if (!timeout) {
519 		dev_err(this->dev, "DMA timeout, last DMA :%d\n",
520 			this->last_dma_type);
521 		gpmi_dump_info(this);
522 		return -ETIMEDOUT;
523 	}
524 	return 0;
525 }
526 
527 /*
528  * This function is used in BCH reading or BCH writing pages.
529  * It will wait for the BCH interrupt as long as ONE second.
530  * Actually, we must wait for two interrupts :
531  *	[1] firstly the DMA interrupt and
532  *	[2] secondly the BCH interrupt.
533  */
start_dma_with_bch_irq(struct gpmi_nand_data * this,struct dma_async_tx_descriptor * desc)534 int start_dma_with_bch_irq(struct gpmi_nand_data *this,
535 			struct dma_async_tx_descriptor *desc)
536 {
537 	struct completion *bch_c = &this->bch_done;
538 	unsigned long timeout;
539 
540 	/* Prepare to receive an interrupt from the BCH block. */
541 	init_completion(bch_c);
542 
543 	/* start the DMA */
544 	start_dma_without_bch_irq(this, desc);
545 
546 	/* Wait for the interrupt from the BCH block. */
547 	timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
548 	if (!timeout) {
549 		dev_err(this->dev, "BCH timeout, last DMA :%d\n",
550 			this->last_dma_type);
551 		gpmi_dump_info(this);
552 		return -ETIMEDOUT;
553 	}
554 	return 0;
555 }
556 
acquire_register_block(struct gpmi_nand_data * this,const char * res_name)557 static int acquire_register_block(struct gpmi_nand_data *this,
558 				  const char *res_name)
559 {
560 	struct platform_device *pdev = this->pdev;
561 	struct resources *res = &this->resources;
562 	struct resource *r;
563 	void __iomem *p;
564 
565 	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
566 	p = devm_ioremap_resource(&pdev->dev, r);
567 	if (IS_ERR(p))
568 		return PTR_ERR(p);
569 
570 	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
571 		res->gpmi_regs = p;
572 	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
573 		res->bch_regs = p;
574 	else
575 		dev_err(this->dev, "unknown resource name : %s\n", res_name);
576 
577 	return 0;
578 }
579 
acquire_bch_irq(struct gpmi_nand_data * this,irq_handler_t irq_h)580 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
581 {
582 	struct platform_device *pdev = this->pdev;
583 	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
584 	struct resource *r;
585 	int err;
586 
587 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
588 	if (!r) {
589 		dev_err(this->dev, "Can't get resource for %s\n", res_name);
590 		return -ENODEV;
591 	}
592 
593 	err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
594 	if (err)
595 		dev_err(this->dev, "error requesting BCH IRQ\n");
596 
597 	return err;
598 }
599 
release_dma_channels(struct gpmi_nand_data * this)600 static void release_dma_channels(struct gpmi_nand_data *this)
601 {
602 	unsigned int i;
603 	for (i = 0; i < DMA_CHANS; i++)
604 		if (this->dma_chans[i]) {
605 			dma_release_channel(this->dma_chans[i]);
606 			this->dma_chans[i] = NULL;
607 		}
608 }
609 
acquire_dma_channels(struct gpmi_nand_data * this)610 static int acquire_dma_channels(struct gpmi_nand_data *this)
611 {
612 	struct platform_device *pdev = this->pdev;
613 	struct dma_chan *dma_chan;
614 
615 	/* request dma channel */
616 	dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
617 	if (!dma_chan) {
618 		dev_err(this->dev, "Failed to request DMA channel.\n");
619 		goto acquire_err;
620 	}
621 
622 	this->dma_chans[0] = dma_chan;
623 	return 0;
624 
625 acquire_err:
626 	release_dma_channels(this);
627 	return -EINVAL;
628 }
629 
gpmi_get_clks(struct gpmi_nand_data * this)630 static int gpmi_get_clks(struct gpmi_nand_data *this)
631 {
632 	struct resources *r = &this->resources;
633 	struct clk *clk;
634 	int err, i;
635 
636 	for (i = 0; i < this->devdata->clks_count; i++) {
637 		clk = devm_clk_get(this->dev, this->devdata->clks[i]);
638 		if (IS_ERR(clk)) {
639 			err = PTR_ERR(clk);
640 			goto err_clock;
641 		}
642 
643 		r->clock[i] = clk;
644 	}
645 
646 	if (GPMI_IS_MX6(this))
647 		/*
648 		 * Set the default value for the gpmi clock.
649 		 *
650 		 * If you want to use the ONFI nand which is in the
651 		 * Synchronous Mode, you should change the clock as you need.
652 		 */
653 		clk_set_rate(r->clock[0], 22000000);
654 
655 	return 0;
656 
657 err_clock:
658 	dev_dbg(this->dev, "failed in finding the clocks.\n");
659 	return err;
660 }
661 
acquire_resources(struct gpmi_nand_data * this)662 static int acquire_resources(struct gpmi_nand_data *this)
663 {
664 	int ret;
665 
666 	ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
667 	if (ret)
668 		goto exit_regs;
669 
670 	ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
671 	if (ret)
672 		goto exit_regs;
673 
674 	ret = acquire_bch_irq(this, bch_irq);
675 	if (ret)
676 		goto exit_regs;
677 
678 	ret = acquire_dma_channels(this);
679 	if (ret)
680 		goto exit_regs;
681 
682 	ret = gpmi_get_clks(this);
683 	if (ret)
684 		goto exit_clock;
685 	return 0;
686 
687 exit_clock:
688 	release_dma_channels(this);
689 exit_regs:
690 	return ret;
691 }
692 
release_resources(struct gpmi_nand_data * this)693 static void release_resources(struct gpmi_nand_data *this)
694 {
695 	release_dma_channels(this);
696 }
697 
init_hardware(struct gpmi_nand_data * this)698 static int init_hardware(struct gpmi_nand_data *this)
699 {
700 	int ret;
701 
702 	/*
703 	 * This structure contains the "safe" GPMI timing that should succeed
704 	 * with any NAND Flash device
705 	 * (although, with less-than-optimal performance).
706 	 */
707 	struct nand_timing  safe_timing = {
708 		.data_setup_in_ns        = 80,
709 		.data_hold_in_ns         = 60,
710 		.address_setup_in_ns     = 25,
711 		.gpmi_sample_delay_in_ns =  6,
712 		.tREA_in_ns              = -1,
713 		.tRLOH_in_ns             = -1,
714 		.tRHOH_in_ns             = -1,
715 	};
716 
717 	/* Initialize the hardwares. */
718 	ret = gpmi_init(this);
719 	if (ret)
720 		return ret;
721 
722 	this->timing = safe_timing;
723 	return 0;
724 }
725 
read_page_prepare(struct gpmi_nand_data * this,void * destination,unsigned length,void * alt_virt,dma_addr_t alt_phys,unsigned alt_size,void ** use_virt,dma_addr_t * use_phys)726 static int read_page_prepare(struct gpmi_nand_data *this,
727 			void *destination, unsigned length,
728 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
729 			void **use_virt, dma_addr_t *use_phys)
730 {
731 	struct device *dev = this->dev;
732 
733 	if (virt_addr_valid(destination)) {
734 		dma_addr_t dest_phys;
735 
736 		dest_phys = dma_map_single(dev, destination,
737 						length, DMA_FROM_DEVICE);
738 		if (dma_mapping_error(dev, dest_phys)) {
739 			if (alt_size < length) {
740 				dev_err(dev, "Alternate buffer is too small\n");
741 				return -ENOMEM;
742 			}
743 			goto map_failed;
744 		}
745 		*use_virt = destination;
746 		*use_phys = dest_phys;
747 		this->direct_dma_map_ok = true;
748 		return 0;
749 	}
750 
751 map_failed:
752 	*use_virt = alt_virt;
753 	*use_phys = alt_phys;
754 	this->direct_dma_map_ok = false;
755 	return 0;
756 }
757 
read_page_end(struct gpmi_nand_data * this,void * destination,unsigned length,void * alt_virt,dma_addr_t alt_phys,unsigned alt_size,void * used_virt,dma_addr_t used_phys)758 static inline void read_page_end(struct gpmi_nand_data *this,
759 			void *destination, unsigned length,
760 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
761 			void *used_virt, dma_addr_t used_phys)
762 {
763 	if (this->direct_dma_map_ok)
764 		dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
765 }
766 
read_page_swap_end(struct gpmi_nand_data * this,void * destination,unsigned length,void * alt_virt,dma_addr_t alt_phys,unsigned alt_size,void * used_virt,dma_addr_t used_phys)767 static inline void read_page_swap_end(struct gpmi_nand_data *this,
768 			void *destination, unsigned length,
769 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
770 			void *used_virt, dma_addr_t used_phys)
771 {
772 	if (!this->direct_dma_map_ok)
773 		memcpy(destination, alt_virt, length);
774 }
775 
send_page_prepare(struct gpmi_nand_data * this,const void * source,unsigned length,void * alt_virt,dma_addr_t alt_phys,unsigned alt_size,const void ** use_virt,dma_addr_t * use_phys)776 static int send_page_prepare(struct gpmi_nand_data *this,
777 			const void *source, unsigned length,
778 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
779 			const void **use_virt, dma_addr_t *use_phys)
780 {
781 	struct device *dev = this->dev;
782 
783 	if (virt_addr_valid(source)) {
784 		dma_addr_t source_phys;
785 
786 		source_phys = dma_map_single(dev, (void *)source, length,
787 						DMA_TO_DEVICE);
788 		if (dma_mapping_error(dev, source_phys)) {
789 			if (alt_size < length) {
790 				dev_err(dev, "Alternate buffer is too small\n");
791 				return -ENOMEM;
792 			}
793 			goto map_failed;
794 		}
795 		*use_virt = source;
796 		*use_phys = source_phys;
797 		return 0;
798 	}
799 map_failed:
800 	/*
801 	 * Copy the content of the source buffer into the alternate
802 	 * buffer and set up the return values accordingly.
803 	 */
804 	memcpy(alt_virt, source, length);
805 
806 	*use_virt = alt_virt;
807 	*use_phys = alt_phys;
808 	return 0;
809 }
810 
send_page_end(struct gpmi_nand_data * this,const void * source,unsigned length,void * alt_virt,dma_addr_t alt_phys,unsigned alt_size,const void * used_virt,dma_addr_t used_phys)811 static void send_page_end(struct gpmi_nand_data *this,
812 			const void *source, unsigned length,
813 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
814 			const void *used_virt, dma_addr_t used_phys)
815 {
816 	struct device *dev = this->dev;
817 	if (used_virt == source)
818 		dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
819 }
820 
gpmi_free_dma_buffer(struct gpmi_nand_data * this)821 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
822 {
823 	struct device *dev = this->dev;
824 
825 	if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
826 		dma_free_coherent(dev, this->page_buffer_size,
827 					this->page_buffer_virt,
828 					this->page_buffer_phys);
829 	kfree(this->cmd_buffer);
830 	kfree(this->data_buffer_dma);
831 	kfree(this->raw_buffer);
832 
833 	this->cmd_buffer	= NULL;
834 	this->data_buffer_dma	= NULL;
835 	this->raw_buffer	= NULL;
836 	this->page_buffer_virt	= NULL;
837 	this->page_buffer_size	=  0;
838 }
839 
840 /* Allocate the DMA buffers */
gpmi_alloc_dma_buffer(struct gpmi_nand_data * this)841 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
842 {
843 	struct bch_geometry *geo = &this->bch_geometry;
844 	struct device *dev = this->dev;
845 	struct mtd_info *mtd = nand_to_mtd(&this->nand);
846 
847 	/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
848 	this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
849 	if (this->cmd_buffer == NULL)
850 		goto error_alloc;
851 
852 	/*
853 	 * [2] Allocate a read/write data buffer.
854 	 *     The gpmi_alloc_dma_buffer can be called twice.
855 	 *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
856 	 *     is called before the nand_scan_ident; and we allocate a buffer
857 	 *     of the real NAND page size when the gpmi_alloc_dma_buffer is
858 	 *     called after the nand_scan_ident.
859 	 */
860 	this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
861 					GFP_DMA | GFP_KERNEL);
862 	if (this->data_buffer_dma == NULL)
863 		goto error_alloc;
864 
865 	/*
866 	 * [3] Allocate the page buffer.
867 	 *
868 	 * Both the payload buffer and the auxiliary buffer must appear on
869 	 * 32-bit boundaries. We presume the size of the payload buffer is a
870 	 * power of two and is much larger than four, which guarantees the
871 	 * auxiliary buffer will appear on a 32-bit boundary.
872 	 */
873 	this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
874 	this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
875 					&this->page_buffer_phys, GFP_DMA);
876 	if (!this->page_buffer_virt)
877 		goto error_alloc;
878 
879 	this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
880 	if (!this->raw_buffer)
881 		goto error_alloc;
882 
883 	/* Slice up the page buffer. */
884 	this->payload_virt = this->page_buffer_virt;
885 	this->payload_phys = this->page_buffer_phys;
886 	this->auxiliary_virt = this->payload_virt + geo->payload_size;
887 	this->auxiliary_phys = this->payload_phys + geo->payload_size;
888 	return 0;
889 
890 error_alloc:
891 	gpmi_free_dma_buffer(this);
892 	return -ENOMEM;
893 }
894 
gpmi_cmd_ctrl(struct mtd_info * mtd,int data,unsigned int ctrl)895 static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
896 {
897 	struct nand_chip *chip = mtd_to_nand(mtd);
898 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
899 	int ret;
900 
901 	/*
902 	 * Every operation begins with a command byte and a series of zero or
903 	 * more address bytes. These are distinguished by either the Address
904 	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
905 	 * asserted. When MTD is ready to execute the command, it will deassert
906 	 * both latch enables.
907 	 *
908 	 * Rather than run a separate DMA operation for every single byte, we
909 	 * queue them up and run a single DMA operation for the entire series
910 	 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
911 	 */
912 	if ((ctrl & (NAND_ALE | NAND_CLE))) {
913 		if (data != NAND_CMD_NONE)
914 			this->cmd_buffer[this->command_length++] = data;
915 		return;
916 	}
917 
918 	if (!this->command_length)
919 		return;
920 
921 	ret = gpmi_send_command(this);
922 	if (ret)
923 		dev_err(this->dev, "Chip: %u, Error %d\n",
924 			this->current_chip, ret);
925 
926 	this->command_length = 0;
927 }
928 
gpmi_dev_ready(struct mtd_info * mtd)929 static int gpmi_dev_ready(struct mtd_info *mtd)
930 {
931 	struct nand_chip *chip = mtd_to_nand(mtd);
932 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
933 
934 	return gpmi_is_ready(this, this->current_chip);
935 }
936 
gpmi_select_chip(struct mtd_info * mtd,int chipnr)937 static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
938 {
939 	struct nand_chip *chip = mtd_to_nand(mtd);
940 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
941 
942 	if ((this->current_chip < 0) && (chipnr >= 0))
943 		gpmi_begin(this);
944 	else if ((this->current_chip >= 0) && (chipnr < 0))
945 		gpmi_end(this);
946 
947 	this->current_chip = chipnr;
948 }
949 
gpmi_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)950 static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
951 {
952 	struct nand_chip *chip = mtd_to_nand(mtd);
953 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
954 
955 	dev_dbg(this->dev, "len is %d\n", len);
956 	this->upper_buf	= buf;
957 	this->upper_len	= len;
958 
959 	gpmi_read_data(this);
960 }
961 
gpmi_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)962 static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
963 {
964 	struct nand_chip *chip = mtd_to_nand(mtd);
965 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
966 
967 	dev_dbg(this->dev, "len is %d\n", len);
968 	this->upper_buf	= (uint8_t *)buf;
969 	this->upper_len	= len;
970 
971 	gpmi_send_data(this);
972 }
973 
gpmi_read_byte(struct mtd_info * mtd)974 static uint8_t gpmi_read_byte(struct mtd_info *mtd)
975 {
976 	struct nand_chip *chip = mtd_to_nand(mtd);
977 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
978 	uint8_t *buf = this->data_buffer_dma;
979 
980 	gpmi_read_buf(mtd, buf, 1);
981 	return buf[0];
982 }
983 
984 /*
985  * Handles block mark swapping.
986  * It can be called in swapping the block mark, or swapping it back,
987  * because the the operations are the same.
988  */
block_mark_swapping(struct gpmi_nand_data * this,void * payload,void * auxiliary)989 static void block_mark_swapping(struct gpmi_nand_data *this,
990 				void *payload, void *auxiliary)
991 {
992 	struct bch_geometry *nfc_geo = &this->bch_geometry;
993 	unsigned char *p;
994 	unsigned char *a;
995 	unsigned int  bit;
996 	unsigned char mask;
997 	unsigned char from_data;
998 	unsigned char from_oob;
999 
1000 	if (!this->swap_block_mark)
1001 		return;
1002 
1003 	/*
1004 	 * If control arrives here, we're swapping. Make some convenience
1005 	 * variables.
1006 	 */
1007 	bit = nfc_geo->block_mark_bit_offset;
1008 	p   = payload + nfc_geo->block_mark_byte_offset;
1009 	a   = auxiliary;
1010 
1011 	/*
1012 	 * Get the byte from the data area that overlays the block mark. Since
1013 	 * the ECC engine applies its own view to the bits in the page, the
1014 	 * physical block mark won't (in general) appear on a byte boundary in
1015 	 * the data.
1016 	 */
1017 	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1018 
1019 	/* Get the byte from the OOB. */
1020 	from_oob = a[0];
1021 
1022 	/* Swap them. */
1023 	a[0] = from_data;
1024 
1025 	mask = (0x1 << bit) - 1;
1026 	p[0] = (p[0] & mask) | (from_oob << bit);
1027 
1028 	mask = ~0 << bit;
1029 	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1030 }
1031 
gpmi_ecc_read_page(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1032 static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1033 				uint8_t *buf, int oob_required, int page)
1034 {
1035 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1036 	struct bch_geometry *nfc_geo = &this->bch_geometry;
1037 	void          *payload_virt;
1038 	dma_addr_t    payload_phys;
1039 	void          *auxiliary_virt;
1040 	dma_addr_t    auxiliary_phys;
1041 	unsigned int  i;
1042 	unsigned char *status;
1043 	unsigned int  max_bitflips = 0;
1044 	int           ret;
1045 
1046 	dev_dbg(this->dev, "page number is : %d\n", page);
1047 	ret = read_page_prepare(this, buf, nfc_geo->payload_size,
1048 					this->payload_virt, this->payload_phys,
1049 					nfc_geo->payload_size,
1050 					&payload_virt, &payload_phys);
1051 	if (ret) {
1052 		dev_err(this->dev, "Inadequate DMA buffer\n");
1053 		ret = -ENOMEM;
1054 		return ret;
1055 	}
1056 	auxiliary_virt = this->auxiliary_virt;
1057 	auxiliary_phys = this->auxiliary_phys;
1058 
1059 	/* go! */
1060 	ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
1061 	read_page_end(this, buf, nfc_geo->payload_size,
1062 			this->payload_virt, this->payload_phys,
1063 			nfc_geo->payload_size,
1064 			payload_virt, payload_phys);
1065 	if (ret) {
1066 		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
1067 		return ret;
1068 	}
1069 
1070 	/* Loop over status bytes, accumulating ECC status. */
1071 	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
1072 
1073 	read_page_swap_end(this, buf, nfc_geo->payload_size,
1074 			   this->payload_virt, this->payload_phys,
1075 			   nfc_geo->payload_size,
1076 			   payload_virt, payload_phys);
1077 
1078 	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
1079 		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1080 			continue;
1081 
1082 		if (*status == STATUS_UNCORRECTABLE) {
1083 			int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1084 			u8 *eccbuf = this->raw_buffer;
1085 			int offset, bitoffset;
1086 			int eccbytes;
1087 			int flips;
1088 
1089 			/* Read ECC bytes into our internal raw_buffer */
1090 			offset = nfc_geo->metadata_size * 8;
1091 			offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1092 			offset -= eccbits;
1093 			bitoffset = offset % 8;
1094 			eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1095 			offset /= 8;
1096 			eccbytes -= offset;
1097 			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
1098 			chip->read_buf(mtd, eccbuf, eccbytes);
1099 
1100 			/*
1101 			 * ECC data are not byte aligned and we may have
1102 			 * in-band data in the first and last byte of
1103 			 * eccbuf. Set non-eccbits to one so that
1104 			 * nand_check_erased_ecc_chunk() does not count them
1105 			 * as bitflips.
1106 			 */
1107 			if (bitoffset)
1108 				eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1109 
1110 			bitoffset = (bitoffset + eccbits) % 8;
1111 			if (bitoffset)
1112 				eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1113 
1114 			/*
1115 			 * The ECC hardware has an uncorrectable ECC status
1116 			 * code in case we have bitflips in an erased page. As
1117 			 * nothing was written into this subpage the ECC is
1118 			 * obviously wrong and we can not trust it. We assume
1119 			 * at this point that we are reading an erased page and
1120 			 * try to correct the bitflips in buffer up to
1121 			 * ecc_strength bitflips. If this is a page with random
1122 			 * data, we exceed this number of bitflips and have a
1123 			 * ECC failure. Otherwise we use the corrected buffer.
1124 			 */
1125 			if (i == 0) {
1126 				/* The first block includes metadata */
1127 				flips = nand_check_erased_ecc_chunk(
1128 						buf + i * nfc_geo->ecc_chunk_size,
1129 						nfc_geo->ecc_chunk_size,
1130 						eccbuf, eccbytes,
1131 						auxiliary_virt,
1132 						nfc_geo->metadata_size,
1133 						nfc_geo->ecc_strength);
1134 			} else {
1135 				flips = nand_check_erased_ecc_chunk(
1136 						buf + i * nfc_geo->ecc_chunk_size,
1137 						nfc_geo->ecc_chunk_size,
1138 						eccbuf, eccbytes,
1139 						NULL, 0,
1140 						nfc_geo->ecc_strength);
1141 			}
1142 
1143 			if (flips > 0) {
1144 				max_bitflips = max_t(unsigned int, max_bitflips,
1145 						     flips);
1146 				mtd->ecc_stats.corrected += flips;
1147 				continue;
1148 			}
1149 
1150 			mtd->ecc_stats.failed++;
1151 			continue;
1152 		}
1153 
1154 		mtd->ecc_stats.corrected += *status;
1155 		max_bitflips = max_t(unsigned int, max_bitflips, *status);
1156 	}
1157 
1158 	/* handle the block mark swapping */
1159 	block_mark_swapping(this, buf, auxiliary_virt);
1160 
1161 	if (oob_required) {
1162 		/*
1163 		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1164 		 * for details about our policy for delivering the OOB.
1165 		 *
1166 		 * We fill the caller's buffer with set bits, and then copy the
1167 		 * block mark to th caller's buffer. Note that, if block mark
1168 		 * swapping was necessary, it has already been done, so we can
1169 		 * rely on the first byte of the auxiliary buffer to contain
1170 		 * the block mark.
1171 		 */
1172 		memset(chip->oob_poi, ~0, mtd->oobsize);
1173 		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
1174 	}
1175 
1176 	return max_bitflips;
1177 }
1178 
1179 /* Fake a virtual small page for the subpage read */
gpmi_ecc_read_subpage(struct mtd_info * mtd,struct nand_chip * chip,uint32_t offs,uint32_t len,uint8_t * buf,int page)1180 static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1181 			uint32_t offs, uint32_t len, uint8_t *buf, int page)
1182 {
1183 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1184 	void __iomem *bch_regs = this->resources.bch_regs;
1185 	struct bch_geometry old_geo = this->bch_geometry;
1186 	struct bch_geometry *geo = &this->bch_geometry;
1187 	int size = chip->ecc.size; /* ECC chunk size */
1188 	int meta, n, page_size;
1189 	u32 r1_old, r2_old, r1_new, r2_new;
1190 	unsigned int max_bitflips;
1191 	int first, last, marker_pos;
1192 	int ecc_parity_size;
1193 	int col = 0;
1194 	int old_swap_block_mark = this->swap_block_mark;
1195 
1196 	/* The size of ECC parity */
1197 	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1198 
1199 	/* Align it with the chunk size */
1200 	first = offs / size;
1201 	last = (offs + len - 1) / size;
1202 
1203 	if (this->swap_block_mark) {
1204 		/*
1205 		 * Find the chunk which contains the Block Marker.
1206 		 * If this chunk is in the range of [first, last],
1207 		 * we have to read out the whole page.
1208 		 * Why? since we had swapped the data at the position of Block
1209 		 * Marker to the metadata which is bound with the chunk 0.
1210 		 */
1211 		marker_pos = geo->block_mark_byte_offset / size;
1212 		if (last >= marker_pos && first <= marker_pos) {
1213 			dev_dbg(this->dev,
1214 				"page:%d, first:%d, last:%d, marker at:%d\n",
1215 				page, first, last, marker_pos);
1216 			return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
1217 		}
1218 	}
1219 
1220 	meta = geo->metadata_size;
1221 	if (first) {
1222 		col = meta + (size + ecc_parity_size) * first;
1223 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
1224 
1225 		meta = 0;
1226 		buf = buf + first * size;
1227 	}
1228 
1229 	/* Save the old environment */
1230 	r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
1231 	r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
1232 
1233 	/* change the BCH registers and bch_geometry{} */
1234 	n = last - first + 1;
1235 	page_size = meta + (size + ecc_parity_size) * n;
1236 
1237 	r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
1238 			BM_BCH_FLASH0LAYOUT0_META_SIZE);
1239 	r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
1240 			| BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
1241 	writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
1242 
1243 	r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
1244 	r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
1245 	writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
1246 
1247 	geo->ecc_chunk_count = n;
1248 	geo->payload_size = n * size;
1249 	geo->page_size = page_size;
1250 	geo->auxiliary_status_offset = ALIGN(meta, 4);
1251 
1252 	dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1253 		page, offs, len, col, first, n, page_size);
1254 
1255 	/* Read the subpage now */
1256 	this->swap_block_mark = false;
1257 	max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
1258 
1259 	/* Restore */
1260 	writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
1261 	writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
1262 	this->bch_geometry = old_geo;
1263 	this->swap_block_mark = old_swap_block_mark;
1264 
1265 	return max_bitflips;
1266 }
1267 
gpmi_ecc_write_page(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1268 static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1269 				const uint8_t *buf, int oob_required, int page)
1270 {
1271 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1272 	struct bch_geometry *nfc_geo = &this->bch_geometry;
1273 	const void *payload_virt;
1274 	dma_addr_t payload_phys;
1275 	const void *auxiliary_virt;
1276 	dma_addr_t auxiliary_phys;
1277 	int        ret;
1278 
1279 	dev_dbg(this->dev, "ecc write page.\n");
1280 	if (this->swap_block_mark) {
1281 		/*
1282 		 * If control arrives here, we're doing block mark swapping.
1283 		 * Since we can't modify the caller's buffers, we must copy them
1284 		 * into our own.
1285 		 */
1286 		memcpy(this->payload_virt, buf, mtd->writesize);
1287 		payload_virt = this->payload_virt;
1288 		payload_phys = this->payload_phys;
1289 
1290 		memcpy(this->auxiliary_virt, chip->oob_poi,
1291 				nfc_geo->auxiliary_size);
1292 		auxiliary_virt = this->auxiliary_virt;
1293 		auxiliary_phys = this->auxiliary_phys;
1294 
1295 		/* Handle block mark swapping. */
1296 		block_mark_swapping(this,
1297 				(void *)payload_virt, (void *)auxiliary_virt);
1298 	} else {
1299 		/*
1300 		 * If control arrives here, we're not doing block mark swapping,
1301 		 * so we can to try and use the caller's buffers.
1302 		 */
1303 		ret = send_page_prepare(this,
1304 				buf, mtd->writesize,
1305 				this->payload_virt, this->payload_phys,
1306 				nfc_geo->payload_size,
1307 				&payload_virt, &payload_phys);
1308 		if (ret) {
1309 			dev_err(this->dev, "Inadequate payload DMA buffer\n");
1310 			return 0;
1311 		}
1312 
1313 		ret = send_page_prepare(this,
1314 				chip->oob_poi, mtd->oobsize,
1315 				this->auxiliary_virt, this->auxiliary_phys,
1316 				nfc_geo->auxiliary_size,
1317 				&auxiliary_virt, &auxiliary_phys);
1318 		if (ret) {
1319 			dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
1320 			goto exit_auxiliary;
1321 		}
1322 	}
1323 
1324 	/* Ask the NFC. */
1325 	ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1326 	if (ret)
1327 		dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
1328 
1329 	if (!this->swap_block_mark) {
1330 		send_page_end(this, chip->oob_poi, mtd->oobsize,
1331 				this->auxiliary_virt, this->auxiliary_phys,
1332 				nfc_geo->auxiliary_size,
1333 				auxiliary_virt, auxiliary_phys);
1334 exit_auxiliary:
1335 		send_page_end(this, buf, mtd->writesize,
1336 				this->payload_virt, this->payload_phys,
1337 				nfc_geo->payload_size,
1338 				payload_virt, payload_phys);
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 /*
1345  * There are several places in this driver where we have to handle the OOB and
1346  * block marks. This is the function where things are the most complicated, so
1347  * this is where we try to explain it all. All the other places refer back to
1348  * here.
1349  *
1350  * These are the rules, in order of decreasing importance:
1351  *
1352  * 1) Nothing the caller does can be allowed to imperil the block mark.
1353  *
1354  * 2) In read operations, the first byte of the OOB we return must reflect the
1355  *    true state of the block mark, no matter where that block mark appears in
1356  *    the physical page.
1357  *
1358  * 3) ECC-based read operations return an OOB full of set bits (since we never
1359  *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1360  *    return).
1361  *
1362  * 4) "Raw" read operations return a direct view of the physical bytes in the
1363  *    page, using the conventional definition of which bytes are data and which
1364  *    are OOB. This gives the caller a way to see the actual, physical bytes
1365  *    in the page, without the distortions applied by our ECC engine.
1366  *
1367  *
1368  * What we do for this specific read operation depends on two questions:
1369  *
1370  * 1) Are we doing a "raw" read, or an ECC-based read?
1371  *
1372  * 2) Are we using block mark swapping or transcription?
1373  *
1374  * There are four cases, illustrated by the following Karnaugh map:
1375  *
1376  *                    |           Raw           |         ECC-based       |
1377  *       -------------+-------------------------+-------------------------+
1378  *                    | Read the conventional   |                         |
1379  *                    | OOB at the end of the   |                         |
1380  *       Swapping     | page and return it. It  |                         |
1381  *                    | contains exactly what   |                         |
1382  *                    | we want.                | Read the block mark and |
1383  *       -------------+-------------------------+ return it in a buffer   |
1384  *                    | Read the conventional   | full of set bits.       |
1385  *                    | OOB at the end of the   |                         |
1386  *                    | page and also the block |                         |
1387  *       Transcribing | mark in the metadata.   |                         |
1388  *                    | Copy the block mark     |                         |
1389  *                    | into the first byte of  |                         |
1390  *                    | the OOB.                |                         |
1391  *       -------------+-------------------------+-------------------------+
1392  *
1393  * Note that we break rule #4 in the Transcribing/Raw case because we're not
1394  * giving an accurate view of the actual, physical bytes in the page (we're
1395  * overwriting the block mark). That's OK because it's more important to follow
1396  * rule #2.
1397  *
1398  * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1399  * easy. When reading a page, for example, the NAND Flash MTD code calls our
1400  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1401  * ECC-based or raw view of the page is implicit in which function it calls
1402  * (there is a similar pair of ECC-based/raw functions for writing).
1403  */
gpmi_ecc_read_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)1404 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1405 				int page)
1406 {
1407 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1408 
1409 	dev_dbg(this->dev, "page number is %d\n", page);
1410 	/* clear the OOB buffer */
1411 	memset(chip->oob_poi, ~0, mtd->oobsize);
1412 
1413 	/* Read out the conventional OOB. */
1414 	chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1415 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1416 
1417 	/*
1418 	 * Now, we want to make sure the block mark is correct. In the
1419 	 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1420 	 * Otherwise, we need to explicitly read it.
1421 	 */
1422 	if (GPMI_IS_MX23(this)) {
1423 		/* Read the block mark into the first byte of the OOB buffer. */
1424 		chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1425 		chip->oob_poi[0] = chip->read_byte(mtd);
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 static int
gpmi_ecc_write_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)1432 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1433 {
1434 	struct mtd_oob_region of = { };
1435 	int status = 0;
1436 
1437 	/* Do we have available oob area? */
1438 	mtd_ooblayout_free(mtd, 0, &of);
1439 	if (!of.length)
1440 		return -EPERM;
1441 
1442 	if (!nand_is_slc(chip))
1443 		return -EPERM;
1444 
1445 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
1446 	chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
1447 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1448 
1449 	status = chip->waitfunc(mtd, chip);
1450 	return status & NAND_STATUS_FAIL ? -EIO : 0;
1451 }
1452 
1453 /*
1454  * This function reads a NAND page without involving the ECC engine (no HW
1455  * ECC correction).
1456  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1457  * inline (interleaved with payload DATA), and do not align data chunk on
1458  * byte boundaries.
1459  * We thus need to take care moving the payload data and ECC bits stored in the
1460  * page into the provided buffers, which is why we're using gpmi_copy_bits.
1461  *
1462  * See set_geometry_by_ecc_info inline comments to have a full description
1463  * of the layout used by the GPMI controller.
1464  */
gpmi_ecc_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1465 static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
1466 				  struct nand_chip *chip, uint8_t *buf,
1467 				  int oob_required, int page)
1468 {
1469 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1470 	struct bch_geometry *nfc_geo = &this->bch_geometry;
1471 	int eccsize = nfc_geo->ecc_chunk_size;
1472 	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1473 	u8 *tmp_buf = this->raw_buffer;
1474 	size_t src_bit_off;
1475 	size_t oob_bit_off;
1476 	size_t oob_byte_off;
1477 	uint8_t *oob = chip->oob_poi;
1478 	int step;
1479 
1480 	chip->read_buf(mtd, tmp_buf,
1481 		       mtd->writesize + mtd->oobsize);
1482 
1483 	/*
1484 	 * If required, swap the bad block marker and the data stored in the
1485 	 * metadata section, so that we don't wrongly consider a block as bad.
1486 	 *
1487 	 * See the layout description for a detailed explanation on why this
1488 	 * is needed.
1489 	 */
1490 	if (this->swap_block_mark) {
1491 		u8 swap = tmp_buf[0];
1492 
1493 		tmp_buf[0] = tmp_buf[mtd->writesize];
1494 		tmp_buf[mtd->writesize] = swap;
1495 	}
1496 
1497 	/*
1498 	 * Copy the metadata section into the oob buffer (this section is
1499 	 * guaranteed to be aligned on a byte boundary).
1500 	 */
1501 	if (oob_required)
1502 		memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1503 
1504 	oob_bit_off = nfc_geo->metadata_size * 8;
1505 	src_bit_off = oob_bit_off;
1506 
1507 	/* Extract interleaved payload data and ECC bits */
1508 	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1509 		if (buf)
1510 			gpmi_copy_bits(buf, step * eccsize * 8,
1511 				       tmp_buf, src_bit_off,
1512 				       eccsize * 8);
1513 		src_bit_off += eccsize * 8;
1514 
1515 		/* Align last ECC block to align a byte boundary */
1516 		if (step == nfc_geo->ecc_chunk_count - 1 &&
1517 		    (oob_bit_off + eccbits) % 8)
1518 			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1519 
1520 		if (oob_required)
1521 			gpmi_copy_bits(oob, oob_bit_off,
1522 				       tmp_buf, src_bit_off,
1523 				       eccbits);
1524 
1525 		src_bit_off += eccbits;
1526 		oob_bit_off += eccbits;
1527 	}
1528 
1529 	if (oob_required) {
1530 		oob_byte_off = oob_bit_off / 8;
1531 
1532 		if (oob_byte_off < mtd->oobsize)
1533 			memcpy(oob + oob_byte_off,
1534 			       tmp_buf + mtd->writesize + oob_byte_off,
1535 			       mtd->oobsize - oob_byte_off);
1536 	}
1537 
1538 	return 0;
1539 }
1540 
1541 /*
1542  * This function writes a NAND page without involving the ECC engine (no HW
1543  * ECC generation).
1544  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1545  * inline (interleaved with payload DATA), and do not align data chunk on
1546  * byte boundaries.
1547  * We thus need to take care moving the OOB area at the right place in the
1548  * final page, which is why we're using gpmi_copy_bits.
1549  *
1550  * See set_geometry_by_ecc_info inline comments to have a full description
1551  * of the layout used by the GPMI controller.
1552  */
gpmi_ecc_write_page_raw(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1553 static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
1554 				   struct nand_chip *chip,
1555 				   const uint8_t *buf,
1556 				   int oob_required, int page)
1557 {
1558 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1559 	struct bch_geometry *nfc_geo = &this->bch_geometry;
1560 	int eccsize = nfc_geo->ecc_chunk_size;
1561 	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1562 	u8 *tmp_buf = this->raw_buffer;
1563 	uint8_t *oob = chip->oob_poi;
1564 	size_t dst_bit_off;
1565 	size_t oob_bit_off;
1566 	size_t oob_byte_off;
1567 	int step;
1568 
1569 	/*
1570 	 * Initialize all bits to 1 in case we don't have a buffer for the
1571 	 * payload or oob data in order to leave unspecified bits of data
1572 	 * to their initial state.
1573 	 */
1574 	if (!buf || !oob_required)
1575 		memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1576 
1577 	/*
1578 	 * First copy the metadata section (stored in oob buffer) at the
1579 	 * beginning of the page, as imposed by the GPMI layout.
1580 	 */
1581 	memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1582 	oob_bit_off = nfc_geo->metadata_size * 8;
1583 	dst_bit_off = oob_bit_off;
1584 
1585 	/* Interleave payload data and ECC bits */
1586 	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1587 		if (buf)
1588 			gpmi_copy_bits(tmp_buf, dst_bit_off,
1589 				       buf, step * eccsize * 8, eccsize * 8);
1590 		dst_bit_off += eccsize * 8;
1591 
1592 		/* Align last ECC block to align a byte boundary */
1593 		if (step == nfc_geo->ecc_chunk_count - 1 &&
1594 		    (oob_bit_off + eccbits) % 8)
1595 			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1596 
1597 		if (oob_required)
1598 			gpmi_copy_bits(tmp_buf, dst_bit_off,
1599 				       oob, oob_bit_off, eccbits);
1600 
1601 		dst_bit_off += eccbits;
1602 		oob_bit_off += eccbits;
1603 	}
1604 
1605 	oob_byte_off = oob_bit_off / 8;
1606 
1607 	if (oob_required && oob_byte_off < mtd->oobsize)
1608 		memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1609 		       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1610 
1611 	/*
1612 	 * If required, swap the bad block marker and the first byte of the
1613 	 * metadata section, so that we don't modify the bad block marker.
1614 	 *
1615 	 * See the layout description for a detailed explanation on why this
1616 	 * is needed.
1617 	 */
1618 	if (this->swap_block_mark) {
1619 		u8 swap = tmp_buf[0];
1620 
1621 		tmp_buf[0] = tmp_buf[mtd->writesize];
1622 		tmp_buf[mtd->writesize] = swap;
1623 	}
1624 
1625 	chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
1626 
1627 	return 0;
1628 }
1629 
gpmi_ecc_read_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1630 static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1631 				 int page)
1632 {
1633 	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1634 
1635 	return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
1636 }
1637 
gpmi_ecc_write_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1638 static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1639 				 int page)
1640 {
1641 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
1642 
1643 	return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
1644 }
1645 
gpmi_block_markbad(struct mtd_info * mtd,loff_t ofs)1646 static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1647 {
1648 	struct nand_chip *chip = mtd_to_nand(mtd);
1649 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
1650 	int ret = 0;
1651 	uint8_t *block_mark;
1652 	int column, page, status, chipnr;
1653 
1654 	chipnr = (int)(ofs >> chip->chip_shift);
1655 	chip->select_chip(mtd, chipnr);
1656 
1657 	column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1658 
1659 	/* Write the block mark. */
1660 	block_mark = this->data_buffer_dma;
1661 	block_mark[0] = 0; /* bad block marker */
1662 
1663 	/* Shift to get page */
1664 	page = (int)(ofs >> chip->page_shift);
1665 
1666 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
1667 	chip->write_buf(mtd, block_mark, 1);
1668 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1669 
1670 	status = chip->waitfunc(mtd, chip);
1671 	if (status & NAND_STATUS_FAIL)
1672 		ret = -EIO;
1673 
1674 	chip->select_chip(mtd, -1);
1675 
1676 	return ret;
1677 }
1678 
nand_boot_set_geometry(struct gpmi_nand_data * this)1679 static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1680 {
1681 	struct boot_rom_geometry *geometry = &this->rom_geometry;
1682 
1683 	/*
1684 	 * Set the boot block stride size.
1685 	 *
1686 	 * In principle, we should be reading this from the OTP bits, since
1687 	 * that's where the ROM is going to get it. In fact, we don't have any
1688 	 * way to read the OTP bits, so we go with the default and hope for the
1689 	 * best.
1690 	 */
1691 	geometry->stride_size_in_pages = 64;
1692 
1693 	/*
1694 	 * Set the search area stride exponent.
1695 	 *
1696 	 * In principle, we should be reading this from the OTP bits, since
1697 	 * that's where the ROM is going to get it. In fact, we don't have any
1698 	 * way to read the OTP bits, so we go with the default and hope for the
1699 	 * best.
1700 	 */
1701 	geometry->search_area_stride_exponent = 2;
1702 	return 0;
1703 }
1704 
1705 static const char  *fingerprint = "STMP";
mx23_check_transcription_stamp(struct gpmi_nand_data * this)1706 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1707 {
1708 	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1709 	struct device *dev = this->dev;
1710 	struct nand_chip *chip = &this->nand;
1711 	struct mtd_info *mtd = nand_to_mtd(chip);
1712 	unsigned int search_area_size_in_strides;
1713 	unsigned int stride;
1714 	unsigned int page;
1715 	uint8_t *buffer = chip->buffers->databuf;
1716 	int saved_chip_number;
1717 	int found_an_ncb_fingerprint = false;
1718 
1719 	/* Compute the number of strides in a search area. */
1720 	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1721 
1722 	saved_chip_number = this->current_chip;
1723 	chip->select_chip(mtd, 0);
1724 
1725 	/*
1726 	 * Loop through the first search area, looking for the NCB fingerprint.
1727 	 */
1728 	dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1729 
1730 	for (stride = 0; stride < search_area_size_in_strides; stride++) {
1731 		/* Compute the page addresses. */
1732 		page = stride * rom_geo->stride_size_in_pages;
1733 
1734 		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1735 
1736 		/*
1737 		 * Read the NCB fingerprint. The fingerprint is four bytes long
1738 		 * and starts in the 12th byte of the page.
1739 		 */
1740 		chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
1741 		chip->read_buf(mtd, buffer, strlen(fingerprint));
1742 
1743 		/* Look for the fingerprint. */
1744 		if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1745 			found_an_ncb_fingerprint = true;
1746 			break;
1747 		}
1748 
1749 	}
1750 
1751 	chip->select_chip(mtd, saved_chip_number);
1752 
1753 	if (found_an_ncb_fingerprint)
1754 		dev_dbg(dev, "\tFound a fingerprint\n");
1755 	else
1756 		dev_dbg(dev, "\tNo fingerprint found\n");
1757 	return found_an_ncb_fingerprint;
1758 }
1759 
1760 /* Writes a transcription stamp. */
mx23_write_transcription_stamp(struct gpmi_nand_data * this)1761 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1762 {
1763 	struct device *dev = this->dev;
1764 	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1765 	struct nand_chip *chip = &this->nand;
1766 	struct mtd_info *mtd = nand_to_mtd(chip);
1767 	unsigned int block_size_in_pages;
1768 	unsigned int search_area_size_in_strides;
1769 	unsigned int search_area_size_in_pages;
1770 	unsigned int search_area_size_in_blocks;
1771 	unsigned int block;
1772 	unsigned int stride;
1773 	unsigned int page;
1774 	uint8_t      *buffer = chip->buffers->databuf;
1775 	int saved_chip_number;
1776 	int status;
1777 
1778 	/* Compute the search area geometry. */
1779 	block_size_in_pages = mtd->erasesize / mtd->writesize;
1780 	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1781 	search_area_size_in_pages = search_area_size_in_strides *
1782 					rom_geo->stride_size_in_pages;
1783 	search_area_size_in_blocks =
1784 		  (search_area_size_in_pages + (block_size_in_pages - 1)) /
1785 				    block_size_in_pages;
1786 
1787 	dev_dbg(dev, "Search Area Geometry :\n");
1788 	dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1789 	dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1790 	dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
1791 
1792 	/* Select chip 0. */
1793 	saved_chip_number = this->current_chip;
1794 	chip->select_chip(mtd, 0);
1795 
1796 	/* Loop over blocks in the first search area, erasing them. */
1797 	dev_dbg(dev, "Erasing the search area...\n");
1798 
1799 	for (block = 0; block < search_area_size_in_blocks; block++) {
1800 		/* Compute the page address. */
1801 		page = block * block_size_in_pages;
1802 
1803 		/* Erase this block. */
1804 		dev_dbg(dev, "\tErasing block 0x%x\n", block);
1805 		chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1806 		chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1807 
1808 		/* Wait for the erase to finish. */
1809 		status = chip->waitfunc(mtd, chip);
1810 		if (status & NAND_STATUS_FAIL)
1811 			dev_err(dev, "[%s] Erase failed.\n", __func__);
1812 	}
1813 
1814 	/* Write the NCB fingerprint into the page buffer. */
1815 	memset(buffer, ~0, mtd->writesize);
1816 	memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1817 
1818 	/* Loop through the first search area, writing NCB fingerprints. */
1819 	dev_dbg(dev, "Writing NCB fingerprints...\n");
1820 	for (stride = 0; stride < search_area_size_in_strides; stride++) {
1821 		/* Compute the page addresses. */
1822 		page = stride * rom_geo->stride_size_in_pages;
1823 
1824 		/* Write the first page of the current stride. */
1825 		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1826 		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1827 		chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
1828 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1829 
1830 		/* Wait for the write to finish. */
1831 		status = chip->waitfunc(mtd, chip);
1832 		if (status & NAND_STATUS_FAIL)
1833 			dev_err(dev, "[%s] Write failed.\n", __func__);
1834 	}
1835 
1836 	/* Deselect chip 0. */
1837 	chip->select_chip(mtd, saved_chip_number);
1838 	return 0;
1839 }
1840 
mx23_boot_init(struct gpmi_nand_data * this)1841 static int mx23_boot_init(struct gpmi_nand_data  *this)
1842 {
1843 	struct device *dev = this->dev;
1844 	struct nand_chip *chip = &this->nand;
1845 	struct mtd_info *mtd = nand_to_mtd(chip);
1846 	unsigned int block_count;
1847 	unsigned int block;
1848 	int     chipnr;
1849 	int     page;
1850 	loff_t  byte;
1851 	uint8_t block_mark;
1852 	int     ret = 0;
1853 
1854 	/*
1855 	 * If control arrives here, we can't use block mark swapping, which
1856 	 * means we're forced to use transcription. First, scan for the
1857 	 * transcription stamp. If we find it, then we don't have to do
1858 	 * anything -- the block marks are already transcribed.
1859 	 */
1860 	if (mx23_check_transcription_stamp(this))
1861 		return 0;
1862 
1863 	/*
1864 	 * If control arrives here, we couldn't find a transcription stamp, so
1865 	 * so we presume the block marks are in the conventional location.
1866 	 */
1867 	dev_dbg(dev, "Transcribing bad block marks...\n");
1868 
1869 	/* Compute the number of blocks in the entire medium. */
1870 	block_count = chip->chipsize >> chip->phys_erase_shift;
1871 
1872 	/*
1873 	 * Loop over all the blocks in the medium, transcribing block marks as
1874 	 * we go.
1875 	 */
1876 	for (block = 0; block < block_count; block++) {
1877 		/*
1878 		 * Compute the chip, page and byte addresses for this block's
1879 		 * conventional mark.
1880 		 */
1881 		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1882 		page = block << (chip->phys_erase_shift - chip->page_shift);
1883 		byte = block <<  chip->phys_erase_shift;
1884 
1885 		/* Send the command to read the conventional block mark. */
1886 		chip->select_chip(mtd, chipnr);
1887 		chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1888 		block_mark = chip->read_byte(mtd);
1889 		chip->select_chip(mtd, -1);
1890 
1891 		/*
1892 		 * Check if the block is marked bad. If so, we need to mark it
1893 		 * again, but this time the result will be a mark in the
1894 		 * location where we transcribe block marks.
1895 		 */
1896 		if (block_mark != 0xff) {
1897 			dev_dbg(dev, "Transcribing mark in block %u\n", block);
1898 			ret = chip->block_markbad(mtd, byte);
1899 			if (ret)
1900 				dev_err(dev,
1901 					"Failed to mark block bad with ret %d\n",
1902 					ret);
1903 		}
1904 	}
1905 
1906 	/* Write the stamp that indicates we've transcribed the block marks. */
1907 	mx23_write_transcription_stamp(this);
1908 	return 0;
1909 }
1910 
nand_boot_init(struct gpmi_nand_data * this)1911 static int nand_boot_init(struct gpmi_nand_data  *this)
1912 {
1913 	nand_boot_set_geometry(this);
1914 
1915 	/* This is ROM arch-specific initilization before the BBT scanning. */
1916 	if (GPMI_IS_MX23(this))
1917 		return mx23_boot_init(this);
1918 	return 0;
1919 }
1920 
gpmi_set_geometry(struct gpmi_nand_data * this)1921 static int gpmi_set_geometry(struct gpmi_nand_data *this)
1922 {
1923 	int ret;
1924 
1925 	/* Free the temporary DMA memory for reading ID. */
1926 	gpmi_free_dma_buffer(this);
1927 
1928 	/* Set up the NFC geometry which is used by BCH. */
1929 	ret = bch_set_geometry(this);
1930 	if (ret) {
1931 		dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
1932 		return ret;
1933 	}
1934 
1935 	/* Alloc the new DMA buffers according to the pagesize and oobsize */
1936 	return gpmi_alloc_dma_buffer(this);
1937 }
1938 
gpmi_init_last(struct gpmi_nand_data * this)1939 static int gpmi_init_last(struct gpmi_nand_data *this)
1940 {
1941 	struct nand_chip *chip = &this->nand;
1942 	struct mtd_info *mtd = nand_to_mtd(chip);
1943 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1944 	struct bch_geometry *bch_geo = &this->bch_geometry;
1945 	int ret;
1946 
1947 	/* Set up the medium geometry */
1948 	ret = gpmi_set_geometry(this);
1949 	if (ret)
1950 		return ret;
1951 
1952 	/* Init the nand_ecc_ctrl{} */
1953 	ecc->read_page	= gpmi_ecc_read_page;
1954 	ecc->write_page	= gpmi_ecc_write_page;
1955 	ecc->read_oob	= gpmi_ecc_read_oob;
1956 	ecc->write_oob	= gpmi_ecc_write_oob;
1957 	ecc->read_page_raw = gpmi_ecc_read_page_raw;
1958 	ecc->write_page_raw = gpmi_ecc_write_page_raw;
1959 	ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
1960 	ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
1961 	ecc->mode	= NAND_ECC_HW;
1962 	ecc->size	= bch_geo->ecc_chunk_size;
1963 	ecc->strength	= bch_geo->ecc_strength;
1964 	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
1965 
1966 	/*
1967 	 * We only enable the subpage read when:
1968 	 *  (1) the chip is imx6, and
1969 	 *  (2) the size of the ECC parity is byte aligned.
1970 	 */
1971 	if (GPMI_IS_MX6(this) &&
1972 		((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
1973 		ecc->read_subpage = gpmi_ecc_read_subpage;
1974 		chip->options |= NAND_SUBPAGE_READ;
1975 	}
1976 
1977 	/*
1978 	 * Can we enable the extra features? such as EDO or Sync mode.
1979 	 *
1980 	 * We do not check the return value now. That's means if we fail in
1981 	 * enable the extra features, we still can run in the normal way.
1982 	 */
1983 	gpmi_extra_init(this);
1984 
1985 	return 0;
1986 }
1987 
gpmi_nand_init(struct gpmi_nand_data * this)1988 static int gpmi_nand_init(struct gpmi_nand_data *this)
1989 {
1990 	struct nand_chip *chip = &this->nand;
1991 	struct mtd_info  *mtd = nand_to_mtd(chip);
1992 	int ret;
1993 
1994 	/* init current chip */
1995 	this->current_chip	= -1;
1996 
1997 	/* init the MTD data structures */
1998 	mtd->name		= "gpmi-nand";
1999 	mtd->dev.parent		= this->dev;
2000 
2001 	/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2002 	nand_set_controller_data(chip, this);
2003 	nand_set_flash_node(chip, this->pdev->dev.of_node);
2004 	chip->select_chip	= gpmi_select_chip;
2005 	chip->cmd_ctrl		= gpmi_cmd_ctrl;
2006 	chip->dev_ready		= gpmi_dev_ready;
2007 	chip->read_byte		= gpmi_read_byte;
2008 	chip->read_buf		= gpmi_read_buf;
2009 	chip->write_buf		= gpmi_write_buf;
2010 	chip->badblock_pattern	= &gpmi_bbt_descr;
2011 	chip->block_markbad	= gpmi_block_markbad;
2012 	chip->options		|= NAND_NO_SUBPAGE_WRITE;
2013 
2014 	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2015 	this->swap_block_mark = !GPMI_IS_MX23(this);
2016 
2017 	/*
2018 	 * Allocate a temporary DMA buffer for reading ID in the
2019 	 * nand_scan_ident().
2020 	 */
2021 	this->bch_geometry.payload_size = 1024;
2022 	this->bch_geometry.auxiliary_size = 128;
2023 	ret = gpmi_alloc_dma_buffer(this);
2024 	if (ret)
2025 		goto err_out;
2026 
2027 	ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
2028 	if (ret)
2029 		goto err_out;
2030 
2031 	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2032 		chip->bbt_options |= NAND_BBT_NO_OOB;
2033 
2034 		if (of_property_read_bool(this->dev->of_node,
2035 						"fsl,no-blockmark-swap"))
2036 			this->swap_block_mark = false;
2037 	}
2038 	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2039 		this->swap_block_mark ? "en" : "dis");
2040 
2041 	ret = gpmi_init_last(this);
2042 	if (ret)
2043 		goto err_out;
2044 
2045 	chip->options |= NAND_SKIP_BBTSCAN;
2046 	ret = nand_scan_tail(mtd);
2047 	if (ret)
2048 		goto err_out;
2049 
2050 	ret = nand_boot_init(this);
2051 	if (ret)
2052 		goto err_nand_cleanup;
2053 	ret = chip->scan_bbt(mtd);
2054 	if (ret)
2055 		goto err_nand_cleanup;
2056 
2057 	ret = mtd_device_register(mtd, NULL, 0);
2058 	if (ret)
2059 		goto err_nand_cleanup;
2060 	return 0;
2061 
2062 err_nand_cleanup:
2063 	nand_cleanup(chip);
2064 err_out:
2065 	gpmi_free_dma_buffer(this);
2066 	return ret;
2067 }
2068 
2069 static const struct of_device_id gpmi_nand_id_table[] = {
2070 	{
2071 		.compatible = "fsl,imx23-gpmi-nand",
2072 		.data = &gpmi_devdata_imx23,
2073 	}, {
2074 		.compatible = "fsl,imx28-gpmi-nand",
2075 		.data = &gpmi_devdata_imx28,
2076 	}, {
2077 		.compatible = "fsl,imx6q-gpmi-nand",
2078 		.data = &gpmi_devdata_imx6q,
2079 	}, {
2080 		.compatible = "fsl,imx6sx-gpmi-nand",
2081 		.data = &gpmi_devdata_imx6sx,
2082 	}, {
2083 		.compatible = "fsl,imx7d-gpmi-nand",
2084 		.data = &gpmi_devdata_imx7d,
2085 	}, {}
2086 };
2087 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2088 
gpmi_nand_probe(struct platform_device * pdev)2089 static int gpmi_nand_probe(struct platform_device *pdev)
2090 {
2091 	struct gpmi_nand_data *this;
2092 	const struct of_device_id *of_id;
2093 	int ret;
2094 
2095 	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2096 	if (!this)
2097 		return -ENOMEM;
2098 
2099 	of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2100 	if (of_id) {
2101 		this->devdata = of_id->data;
2102 	} else {
2103 		dev_err(&pdev->dev, "Failed to find the right device id.\n");
2104 		return -ENODEV;
2105 	}
2106 
2107 	platform_set_drvdata(pdev, this);
2108 	this->pdev  = pdev;
2109 	this->dev   = &pdev->dev;
2110 
2111 	ret = acquire_resources(this);
2112 	if (ret)
2113 		goto exit_acquire_resources;
2114 
2115 	ret = init_hardware(this);
2116 	if (ret)
2117 		goto exit_nfc_init;
2118 
2119 	ret = gpmi_nand_init(this);
2120 	if (ret)
2121 		goto exit_nfc_init;
2122 
2123 	dev_info(this->dev, "driver registered.\n");
2124 
2125 	return 0;
2126 
2127 exit_nfc_init:
2128 	release_resources(this);
2129 exit_acquire_resources:
2130 
2131 	return ret;
2132 }
2133 
gpmi_nand_remove(struct platform_device * pdev)2134 static int gpmi_nand_remove(struct platform_device *pdev)
2135 {
2136 	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2137 
2138 	nand_release(nand_to_mtd(&this->nand));
2139 	gpmi_free_dma_buffer(this);
2140 	release_resources(this);
2141 	return 0;
2142 }
2143 
2144 #ifdef CONFIG_PM_SLEEP
gpmi_pm_suspend(struct device * dev)2145 static int gpmi_pm_suspend(struct device *dev)
2146 {
2147 	struct gpmi_nand_data *this = dev_get_drvdata(dev);
2148 
2149 	release_dma_channels(this);
2150 	return 0;
2151 }
2152 
gpmi_pm_resume(struct device * dev)2153 static int gpmi_pm_resume(struct device *dev)
2154 {
2155 	struct gpmi_nand_data *this = dev_get_drvdata(dev);
2156 	int ret;
2157 
2158 	ret = acquire_dma_channels(this);
2159 	if (ret < 0)
2160 		return ret;
2161 
2162 	/* re-init the GPMI registers */
2163 	this->flags &= ~GPMI_TIMING_INIT_OK;
2164 	ret = gpmi_init(this);
2165 	if (ret) {
2166 		dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2167 		return ret;
2168 	}
2169 
2170 	/* re-init the BCH registers */
2171 	ret = bch_set_geometry(this);
2172 	if (ret) {
2173 		dev_err(this->dev, "Error setting BCH : %d\n", ret);
2174 		return ret;
2175 	}
2176 
2177 	/* re-init others */
2178 	gpmi_extra_init(this);
2179 
2180 	return 0;
2181 }
2182 #endif /* CONFIG_PM_SLEEP */
2183 
2184 static const struct dev_pm_ops gpmi_pm_ops = {
2185 	SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2186 };
2187 
2188 static struct platform_driver gpmi_nand_driver = {
2189 	.driver = {
2190 		.name = "gpmi-nand",
2191 		.pm = &gpmi_pm_ops,
2192 		.of_match_table = gpmi_nand_id_table,
2193 	},
2194 	.probe   = gpmi_nand_probe,
2195 	.remove  = gpmi_nand_remove,
2196 };
2197 module_platform_driver(gpmi_nand_driver);
2198 
2199 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2200 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2201 MODULE_LICENSE("GPL");
2202