1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Freescale GPMI NAND Flash Driver
4 *
5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 */
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/mtd/partitions.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/dma/mxs-dma.h>
19 #include "gpmi-nand.h"
20 #include "gpmi-regs.h"
21 #include "bch-regs.h"
22
23 /* Resource names for the GPMI NAND driver. */
24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
27
28 /* Converts time to clock cycles */
29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31 #define MXS_SET_ADDR 0x4
32 #define MXS_CLR_ADDR 0x8
33 /*
34 * Clear the bit and poll it cleared. This is usually called with
35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
36 * (bit 30).
37 */
clear_poll_bit(void __iomem * addr,u32 mask)38 static int clear_poll_bit(void __iomem *addr, u32 mask)
39 {
40 int timeout = 0x400;
41
42 /* clear the bit */
43 writel(mask, addr + MXS_CLR_ADDR);
44
45 /*
46 * SFTRST needs 3 GPMI clocks to settle, the reference manual
47 * recommends to wait 1us.
48 */
49 udelay(1);
50
51 /* poll the bit becoming clear */
52 while ((readl(addr) & mask) && --timeout)
53 /* nothing */;
54
55 return !timeout;
56 }
57
58 #define MODULE_CLKGATE (1 << 30)
59 #define MODULE_SFTRST (1 << 31)
60 /*
61 * The current mxs_reset_block() will do two things:
62 * [1] enable the module.
63 * [2] reset the module.
64 *
65 * In most of the cases, it's ok.
66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
67 * If you try to soft reset the BCH block, it becomes unusable until
68 * the next hard reset. This case occurs in the NAND boot mode. When the board
69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
71 * You will see a DMA timeout in this case. The bug has been fixed
72 * in the following chips, such as MX28.
73 *
74 * To avoid this bug, just add a new parameter `just_enable` for
75 * the mxs_reset_block(), and rewrite it here.
76 */
gpmi_reset_block(void __iomem * reset_addr,bool just_enable)77 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78 {
79 int ret;
80 int timeout = 0x400;
81
82 /* clear and poll SFTRST */
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87 /* clear CLKGATE */
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91 /* set SFTRST to reset the block */
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95 /* poll CLKGATE becoming set */
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 /* nothing */;
98 if (unlikely(!timeout))
99 goto error;
100 }
101
102 /* clear and poll SFTRST */
103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
106
107 /* clear and poll CLKGATE */
108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109 if (unlikely(ret))
110 goto error;
111
112 return 0;
113
114 error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
117 }
118
__gpmi_enable_clk(struct gpmi_nand_data * this,bool v)119 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120 {
121 struct clk *clk;
122 int ret;
123 int i;
124
125 for (i = 0; i < GPMI_CLK_MAX; i++) {
126 clk = this->resources.clock[i];
127 if (!clk)
128 break;
129
130 if (v) {
131 ret = clk_prepare_enable(clk);
132 if (ret)
133 goto err_clk;
134 } else {
135 clk_disable_unprepare(clk);
136 }
137 }
138 return 0;
139
140 err_clk:
141 for (; i > 0; i--)
142 clk_disable_unprepare(this->resources.clock[i - 1]);
143 return ret;
144 }
145
gpmi_init(struct gpmi_nand_data * this)146 static int gpmi_init(struct gpmi_nand_data *this)
147 {
148 struct resources *r = &this->resources;
149 int ret;
150
151 ret = pm_runtime_get_sync(this->dev);
152 if (ret < 0) {
153 pm_runtime_put_noidle(this->dev);
154 return ret;
155 }
156
157 ret = gpmi_reset_block(r->gpmi_regs, false);
158 if (ret)
159 goto err_out;
160
161 /*
162 * Reset BCH here, too. We got failures otherwise :(
163 * See later BCH reset for explanation of MX23 and MX28 handling
164 */
165 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
166 if (ret)
167 goto err_out;
168
169 /* Choose NAND mode. */
170 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
171
172 /* Set the IRQ polarity. */
173 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
174 r->gpmi_regs + HW_GPMI_CTRL1_SET);
175
176 /* Disable Write-Protection. */
177 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
178
179 /* Select BCH ECC. */
180 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
182 /*
183 * Decouple the chip select from dma channel. We use dma0 for all
184 * the chips.
185 */
186 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
187
188 err_out:
189 pm_runtime_mark_last_busy(this->dev);
190 pm_runtime_put_autosuspend(this->dev);
191 return ret;
192 }
193
194 /* This function is very useful. It is called only when the bug occur. */
gpmi_dump_info(struct gpmi_nand_data * this)195 static void gpmi_dump_info(struct gpmi_nand_data *this)
196 {
197 struct resources *r = &this->resources;
198 struct bch_geometry *geo = &this->bch_geometry;
199 u32 reg;
200 int i;
201
202 dev_err(this->dev, "Show GPMI registers :\n");
203 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
204 reg = readl(r->gpmi_regs + i * 0x10);
205 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
206 }
207
208 /* start to print out the BCH info */
209 dev_err(this->dev, "Show BCH registers :\n");
210 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
211 reg = readl(r->bch_regs + i * 0x10);
212 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
213 }
214 dev_err(this->dev, "BCH Geometry :\n"
215 "GF length : %u\n"
216 "ECC Strength : %u\n"
217 "Page Size in Bytes : %u\n"
218 "Metadata Size in Bytes : %u\n"
219 "ECC Chunk Size in Bytes: %u\n"
220 "ECC Chunk Count : %u\n"
221 "Payload Size in Bytes : %u\n"
222 "Auxiliary Size in Bytes: %u\n"
223 "Auxiliary Status Offset: %u\n"
224 "Block Mark Byte Offset : %u\n"
225 "Block Mark Bit Offset : %u\n",
226 geo->gf_len,
227 geo->ecc_strength,
228 geo->page_size,
229 geo->metadata_size,
230 geo->ecc_chunk_size,
231 geo->ecc_chunk_count,
232 geo->payload_size,
233 geo->auxiliary_size,
234 geo->auxiliary_status_offset,
235 geo->block_mark_byte_offset,
236 geo->block_mark_bit_offset);
237 }
238
gpmi_check_ecc(struct gpmi_nand_data * this)239 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
240 {
241 struct bch_geometry *geo = &this->bch_geometry;
242
243 /* Do the sanity check. */
244 if (GPMI_IS_MXS(this)) {
245 /* The mx23/mx28 only support the GF13. */
246 if (geo->gf_len == 14)
247 return false;
248 }
249 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
250 }
251
252 /*
253 * If we can get the ECC information from the nand chip, we do not
254 * need to calculate them ourselves.
255 *
256 * We may have available oob space in this case.
257 */
set_geometry_by_ecc_info(struct gpmi_nand_data * this,unsigned int ecc_strength,unsigned int ecc_step)258 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
259 unsigned int ecc_strength,
260 unsigned int ecc_step)
261 {
262 struct bch_geometry *geo = &this->bch_geometry;
263 struct nand_chip *chip = &this->nand;
264 struct mtd_info *mtd = nand_to_mtd(chip);
265 unsigned int block_mark_bit_offset;
266
267 switch (ecc_step) {
268 case SZ_512:
269 geo->gf_len = 13;
270 break;
271 case SZ_1K:
272 geo->gf_len = 14;
273 break;
274 default:
275 dev_err(this->dev,
276 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
277 chip->base.eccreq.strength,
278 chip->base.eccreq.step_size);
279 return -EINVAL;
280 }
281 geo->ecc_chunk_size = ecc_step;
282 geo->ecc_strength = round_up(ecc_strength, 2);
283 if (!gpmi_check_ecc(this))
284 return -EINVAL;
285
286 /* Keep the C >= O */
287 if (geo->ecc_chunk_size < mtd->oobsize) {
288 dev_err(this->dev,
289 "unsupported nand chip. ecc size: %d, oob size : %d\n",
290 ecc_step, mtd->oobsize);
291 return -EINVAL;
292 }
293
294 /* The default value, see comment in the legacy_set_geometry(). */
295 geo->metadata_size = 10;
296
297 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
298
299 /*
300 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
301 *
302 * | P |
303 * |<----------------------------------------------------->|
304 * | |
305 * | (Block Mark) |
306 * | P' | | | |
307 * |<-------------------------------------------->| D | | O' |
308 * | |<---->| |<--->|
309 * V V V V V
310 * +---+----------+-+----------+-+----------+-+----------+-+-----+
311 * | M | data |E| data |E| data |E| data |E| |
312 * +---+----------+-+----------+-+----------+-+----------+-+-----+
313 * ^ ^
314 * | O |
315 * |<------------>|
316 * | |
317 *
318 * P : the page size for BCH module.
319 * E : The ECC strength.
320 * G : the length of Galois Field.
321 * N : The chunk count of per page.
322 * M : the metasize of per page.
323 * C : the ecc chunk size, aka the "data" above.
324 * P': the nand chip's page size.
325 * O : the nand chip's oob size.
326 * O': the free oob.
327 *
328 * The formula for P is :
329 *
330 * E * G * N
331 * P = ------------ + P' + M
332 * 8
333 *
334 * The position of block mark moves forward in the ECC-based view
335 * of page, and the delta is:
336 *
337 * E * G * (N - 1)
338 * D = (---------------- + M)
339 * 8
340 *
341 * Please see the comment in legacy_set_geometry().
342 * With the condition C >= O , we still can get same result.
343 * So the bit position of the physical block mark within the ECC-based
344 * view of the page is :
345 * (P' - D) * 8
346 */
347 geo->page_size = mtd->writesize + geo->metadata_size +
348 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
349
350 geo->payload_size = mtd->writesize;
351
352 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
353 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
354 + ALIGN(geo->ecc_chunk_count, 4);
355
356 if (!this->swap_block_mark)
357 return 0;
358
359 /* For bit swap. */
360 block_mark_bit_offset = mtd->writesize * 8 -
361 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
362 + geo->metadata_size * 8);
363
364 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
365 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
366 return 0;
367 }
368
369 /*
370 * Calculate the ECC strength by hand:
371 * E : The ECC strength.
372 * G : the length of Galois Field.
373 * N : The chunk count of per page.
374 * O : the oobsize of the NAND chip.
375 * M : the metasize of per page.
376 *
377 * The formula is :
378 * E * G * N
379 * ------------ <= (O - M)
380 * 8
381 *
382 * So, we get E by:
383 * (O - M) * 8
384 * E <= -------------
385 * G * N
386 */
get_ecc_strength(struct gpmi_nand_data * this)387 static inline int get_ecc_strength(struct gpmi_nand_data *this)
388 {
389 struct bch_geometry *geo = &this->bch_geometry;
390 struct mtd_info *mtd = nand_to_mtd(&this->nand);
391 int ecc_strength;
392
393 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
394 / (geo->gf_len * geo->ecc_chunk_count);
395
396 /* We need the minor even number. */
397 return round_down(ecc_strength, 2);
398 }
399
legacy_set_geometry(struct gpmi_nand_data * this)400 static int legacy_set_geometry(struct gpmi_nand_data *this)
401 {
402 struct bch_geometry *geo = &this->bch_geometry;
403 struct mtd_info *mtd = nand_to_mtd(&this->nand);
404 unsigned int metadata_size;
405 unsigned int status_size;
406 unsigned int block_mark_bit_offset;
407
408 /*
409 * The size of the metadata can be changed, though we set it to 10
410 * bytes now. But it can't be too large, because we have to save
411 * enough space for BCH.
412 */
413 geo->metadata_size = 10;
414
415 /* The default for the length of Galois Field. */
416 geo->gf_len = 13;
417
418 /* The default for chunk size. */
419 geo->ecc_chunk_size = 512;
420 while (geo->ecc_chunk_size < mtd->oobsize) {
421 geo->ecc_chunk_size *= 2; /* keep C >= O */
422 geo->gf_len = 14;
423 }
424
425 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
426
427 /* We use the same ECC strength for all chunks. */
428 geo->ecc_strength = get_ecc_strength(this);
429 if (!gpmi_check_ecc(this)) {
430 dev_err(this->dev,
431 "ecc strength: %d cannot be supported by the controller (%d)\n"
432 "try to use minimum ecc strength that NAND chip required\n",
433 geo->ecc_strength,
434 this->devdata->bch_max_ecc_strength);
435 return -EINVAL;
436 }
437
438 geo->page_size = mtd->writesize + geo->metadata_size +
439 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
440 geo->payload_size = mtd->writesize;
441
442 /*
443 * The auxiliary buffer contains the metadata and the ECC status. The
444 * metadata is padded to the nearest 32-bit boundary. The ECC status
445 * contains one byte for every ECC chunk, and is also padded to the
446 * nearest 32-bit boundary.
447 */
448 metadata_size = ALIGN(geo->metadata_size, 4);
449 status_size = ALIGN(geo->ecc_chunk_count, 4);
450
451 geo->auxiliary_size = metadata_size + status_size;
452 geo->auxiliary_status_offset = metadata_size;
453
454 if (!this->swap_block_mark)
455 return 0;
456
457 /*
458 * We need to compute the byte and bit offsets of
459 * the physical block mark within the ECC-based view of the page.
460 *
461 * NAND chip with 2K page shows below:
462 * (Block Mark)
463 * | |
464 * | D |
465 * |<---->|
466 * V V
467 * +---+----------+-+----------+-+----------+-+----------+-+
468 * | M | data |E| data |E| data |E| data |E|
469 * +---+----------+-+----------+-+----------+-+----------+-+
470 *
471 * The position of block mark moves forward in the ECC-based view
472 * of page, and the delta is:
473 *
474 * E * G * (N - 1)
475 * D = (---------------- + M)
476 * 8
477 *
478 * With the formula to compute the ECC strength, and the condition
479 * : C >= O (C is the ecc chunk size)
480 *
481 * It's easy to deduce to the following result:
482 *
483 * E * G (O - M) C - M C - M
484 * ----------- <= ------- <= -------- < ---------
485 * 8 N N (N - 1)
486 *
487 * So, we get:
488 *
489 * E * G * (N - 1)
490 * D = (---------------- + M) < C
491 * 8
492 *
493 * The above inequality means the position of block mark
494 * within the ECC-based view of the page is still in the data chunk,
495 * and it's NOT in the ECC bits of the chunk.
496 *
497 * Use the following to compute the bit position of the
498 * physical block mark within the ECC-based view of the page:
499 * (page_size - D) * 8
500 *
501 * --Huang Shijie
502 */
503 block_mark_bit_offset = mtd->writesize * 8 -
504 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
505 + geo->metadata_size * 8);
506
507 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
508 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
509 return 0;
510 }
511
common_nfc_set_geometry(struct gpmi_nand_data * this)512 static int common_nfc_set_geometry(struct gpmi_nand_data *this)
513 {
514 struct nand_chip *chip = &this->nand;
515
516 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
517 return set_geometry_by_ecc_info(this, chip->ecc.strength,
518 chip->ecc.size);
519
520 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
521 || legacy_set_geometry(this)) {
522 if (!(chip->base.eccreq.strength > 0 &&
523 chip->base.eccreq.step_size > 0))
524 return -EINVAL;
525
526 return set_geometry_by_ecc_info(this,
527 chip->base.eccreq.strength,
528 chip->base.eccreq.step_size);
529 }
530
531 return 0;
532 }
533
534 /* Configures the geometry for BCH. */
bch_set_geometry(struct gpmi_nand_data * this)535 static int bch_set_geometry(struct gpmi_nand_data *this)
536 {
537 struct resources *r = &this->resources;
538 int ret;
539
540 ret = common_nfc_set_geometry(this);
541 if (ret)
542 return ret;
543
544 ret = pm_runtime_get_sync(this->dev);
545 if (ret < 0) {
546 pm_runtime_put_autosuspend(this->dev);
547 return ret;
548 }
549
550 /*
551 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
552 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
553 * and MX28.
554 */
555 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
556 if (ret)
557 goto err_out;
558
559 /* Set *all* chip selects to use layout 0. */
560 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
561
562 ret = 0;
563 err_out:
564 pm_runtime_mark_last_busy(this->dev);
565 pm_runtime_put_autosuspend(this->dev);
566
567 return ret;
568 }
569
570 /*
571 * <1> Firstly, we should know what's the GPMI-clock means.
572 * The GPMI-clock is the internal clock in the gpmi nand controller.
573 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
574 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
575 *
576 * <2> Secondly, we should know what's the frequency on the nand chip pins.
577 * The frequency on the nand chip pins is derived from the GPMI-clock.
578 * We can get it from the following equation:
579 *
580 * F = G / (DS + DH)
581 *
582 * F : the frequency on the nand chip pins.
583 * G : the GPMI clock, such as 100MHz.
584 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
585 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
586 *
587 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
588 * the nand EDO(extended Data Out) timing could be applied.
589 * The GPMI implements a feedback read strobe to sample the read data.
590 * The feedback read strobe can be delayed to support the nand EDO timing
591 * where the read strobe may deasserts before the read data is valid, and
592 * read data is valid for some time after read strobe.
593 *
594 * The following figure illustrates some aspects of a NAND Flash read:
595 *
596 * |<---tREA---->|
597 * | |
598 * | | |
599 * |<--tRP-->| |
600 * | | |
601 * __ ___|__________________________________
602 * RDN \________/ |
603 * |
604 * /---------\
605 * Read Data --------------< >---------
606 * \---------/
607 * | |
608 * |<-D->|
609 * FeedbackRDN ________ ____________
610 * \___________/
611 *
612 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
613 *
614 *
615 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
616 *
617 * 4.1) From the aspect of the nand chip pins:
618 * Delay = (tREA + C - tRP) {1}
619 *
620 * tREA : the maximum read access time.
621 * C : a constant to adjust the delay. default is 4000ps.
622 * tRP : the read pulse width, which is exactly:
623 * tRP = (GPMI-clock-period) * DATA_SETUP
624 *
625 * 4.2) From the aspect of the GPMI nand controller:
626 * Delay = RDN_DELAY * 0.125 * RP {2}
627 *
628 * RP : the DLL reference period.
629 * if (GPMI-clock-period > DLL_THRETHOLD)
630 * RP = GPMI-clock-period / 2;
631 * else
632 * RP = GPMI-clock-period;
633 *
634 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
635 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
636 * is 16000ps, but in mx6q, we use 12000ps.
637 *
638 * 4.3) since {1} equals {2}, we get:
639 *
640 * (tREA + 4000 - tRP) * 8
641 * RDN_DELAY = ----------------------- {3}
642 * RP
643 */
gpmi_nfc_compute_timings(struct gpmi_nand_data * this,const struct nand_sdr_timings * sdr)644 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
645 const struct nand_sdr_timings *sdr)
646 {
647 struct gpmi_nfc_hardware_timing *hw = &this->hw;
648 struct resources *r = &this->resources;
649 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
650 unsigned int period_ps, reference_period_ps;
651 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
652 unsigned int tRP_ps;
653 bool use_half_period;
654 int sample_delay_ps, sample_delay_factor;
655 unsigned int busy_timeout_cycles;
656 u8 wrn_dly_sel;
657 u64 busy_timeout_ps;
658
659 if (sdr->tRC_min >= 30000) {
660 /* ONFI non-EDO modes [0-3] */
661 hw->clk_rate = 22000000;
662 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
663 } else if (sdr->tRC_min >= 25000) {
664 /* ONFI EDO mode 4 */
665 hw->clk_rate = 80000000;
666 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
667 } else {
668 /* ONFI EDO mode 5 */
669 hw->clk_rate = 100000000;
670 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
671 }
672
673 hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
674
675 /* SDR core timings are given in picoseconds */
676 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
677
678 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
679 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
680 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
681 busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
682 busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
683
684 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
685 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
686 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
687 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
688
689 /*
690 * Derive NFC ideal delay from {3}:
691 *
692 * (tREA + 4000 - tRP) * 8
693 * RDN_DELAY = -----------------------
694 * RP
695 */
696 if (period_ps > dll_threshold_ps) {
697 use_half_period = true;
698 reference_period_ps = period_ps / 2;
699 } else {
700 use_half_period = false;
701 reference_period_ps = period_ps;
702 }
703
704 tRP_ps = data_setup_cycles * period_ps;
705 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
706 if (sample_delay_ps > 0)
707 sample_delay_factor = sample_delay_ps / reference_period_ps;
708 else
709 sample_delay_factor = 0;
710
711 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
712 if (sample_delay_factor)
713 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
714 BM_GPMI_CTRL1_DLL_ENABLE |
715 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
716 }
717
gpmi_nfc_apply_timings(struct gpmi_nand_data * this)718 static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
719 {
720 struct gpmi_nfc_hardware_timing *hw = &this->hw;
721 struct resources *r = &this->resources;
722 void __iomem *gpmi_regs = r->gpmi_regs;
723 unsigned int dll_wait_time_us;
724 int ret;
725
726 /* Clock dividers do NOT guarantee a clean clock signal on its output
727 * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
728 * all clock dividers provide these guarantee.
729 */
730 if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
731 clk_disable_unprepare(r->clock[0]);
732
733 ret = clk_set_rate(r->clock[0], hw->clk_rate);
734 if (ret) {
735 dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
736 return ret;
737 }
738
739 if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
740 ret = clk_prepare_enable(r->clock[0]);
741 if (ret)
742 return ret;
743 }
744
745 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
746 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
747
748 /*
749 * Clear several CTRL1 fields, DLL must be disabled when setting
750 * RDN_DELAY or HALF_PERIOD.
751 */
752 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
753 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
754
755 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
756 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
757 if (!dll_wait_time_us)
758 dll_wait_time_us = 1;
759
760 /* Wait for the DLL to settle. */
761 udelay(dll_wait_time_us);
762
763 return 0;
764 }
765
gpmi_setup_data_interface(struct nand_chip * chip,int chipnr,const struct nand_data_interface * conf)766 static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
767 const struct nand_data_interface *conf)
768 {
769 struct gpmi_nand_data *this = nand_get_controller_data(chip);
770 const struct nand_sdr_timings *sdr;
771
772 /* Retrieve required NAND timings */
773 sdr = nand_get_sdr_timings(conf);
774 if (IS_ERR(sdr))
775 return PTR_ERR(sdr);
776
777 /* Only MX6 GPMI controller can reach EDO timings */
778 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
779 return -ENOTSUPP;
780
781 /* Stop here if this call was just a check */
782 if (chipnr < 0)
783 return 0;
784
785 /* Do the actual derivation of the controller timings */
786 gpmi_nfc_compute_timings(this, sdr);
787
788 this->hw.must_apply_timings = true;
789
790 return 0;
791 }
792
793 /* Clears a BCH interrupt. */
gpmi_clear_bch(struct gpmi_nand_data * this)794 static void gpmi_clear_bch(struct gpmi_nand_data *this)
795 {
796 struct resources *r = &this->resources;
797 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
798 }
799
get_dma_chan(struct gpmi_nand_data * this)800 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
801 {
802 /* We use the DMA channel 0 to access all the nand chips. */
803 return this->dma_chans[0];
804 }
805
806 /* This will be called after the DMA operation is finished. */
dma_irq_callback(void * param)807 static void dma_irq_callback(void *param)
808 {
809 struct gpmi_nand_data *this = param;
810 struct completion *dma_c = &this->dma_done;
811
812 complete(dma_c);
813 }
814
bch_irq(int irq,void * cookie)815 static irqreturn_t bch_irq(int irq, void *cookie)
816 {
817 struct gpmi_nand_data *this = cookie;
818
819 gpmi_clear_bch(this);
820 complete(&this->bch_done);
821 return IRQ_HANDLED;
822 }
823
gpmi_raw_len_to_len(struct gpmi_nand_data * this,int raw_len)824 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
825 {
826 /*
827 * raw_len is the length to read/write including bch data which
828 * we are passed in exec_op. Calculate the data length from it.
829 */
830 if (this->bch)
831 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
832 else
833 return raw_len;
834 }
835
836 /* Can we use the upper's buffer directly for DMA? */
prepare_data_dma(struct gpmi_nand_data * this,const void * buf,int raw_len,struct scatterlist * sgl,enum dma_data_direction dr)837 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
838 int raw_len, struct scatterlist *sgl,
839 enum dma_data_direction dr)
840 {
841 int ret;
842 int len = gpmi_raw_len_to_len(this, raw_len);
843
844 /* first try to map the upper buffer directly */
845 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
846 sg_init_one(sgl, buf, len);
847 ret = dma_map_sg(this->dev, sgl, 1, dr);
848 if (ret == 0)
849 goto map_fail;
850
851 return true;
852 }
853
854 map_fail:
855 /* We have to use our own DMA buffer. */
856 sg_init_one(sgl, this->data_buffer_dma, len);
857
858 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
859 memcpy(this->data_buffer_dma, buf, len);
860
861 dma_map_sg(this->dev, sgl, 1, dr);
862
863 return false;
864 }
865
866 /**
867 * gpmi_copy_bits - copy bits from one memory region to another
868 * @dst: destination buffer
869 * @dst_bit_off: bit offset we're starting to write at
870 * @src: source buffer
871 * @src_bit_off: bit offset we're starting to read from
872 * @nbits: number of bits to copy
873 *
874 * This functions copies bits from one memory region to another, and is used by
875 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
876 * aligned.
877 *
878 * src and dst should not overlap.
879 *
880 */
gpmi_copy_bits(u8 * dst,size_t dst_bit_off,const u8 * src,size_t src_bit_off,size_t nbits)881 static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
882 size_t src_bit_off, size_t nbits)
883 {
884 size_t i;
885 size_t nbytes;
886 u32 src_buffer = 0;
887 size_t bits_in_src_buffer = 0;
888
889 if (!nbits)
890 return;
891
892 /*
893 * Move src and dst pointers to the closest byte pointer and store bit
894 * offsets within a byte.
895 */
896 src += src_bit_off / 8;
897 src_bit_off %= 8;
898
899 dst += dst_bit_off / 8;
900 dst_bit_off %= 8;
901
902 /*
903 * Initialize the src_buffer value with bits available in the first
904 * byte of data so that we end up with a byte aligned src pointer.
905 */
906 if (src_bit_off) {
907 src_buffer = src[0] >> src_bit_off;
908 if (nbits >= (8 - src_bit_off)) {
909 bits_in_src_buffer += 8 - src_bit_off;
910 } else {
911 src_buffer &= GENMASK(nbits - 1, 0);
912 bits_in_src_buffer += nbits;
913 }
914 nbits -= bits_in_src_buffer;
915 src++;
916 }
917
918 /* Calculate the number of bytes that can be copied from src to dst. */
919 nbytes = nbits / 8;
920
921 /* Try to align dst to a byte boundary. */
922 if (dst_bit_off) {
923 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
924 src_buffer |= src[0] << bits_in_src_buffer;
925 bits_in_src_buffer += 8;
926 src++;
927 nbytes--;
928 }
929
930 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
931 dst[0] &= GENMASK(dst_bit_off - 1, 0);
932 dst[0] |= src_buffer << dst_bit_off;
933 src_buffer >>= (8 - dst_bit_off);
934 bits_in_src_buffer -= (8 - dst_bit_off);
935 dst_bit_off = 0;
936 dst++;
937 if (bits_in_src_buffer > 7) {
938 bits_in_src_buffer -= 8;
939 dst[0] = src_buffer;
940 dst++;
941 src_buffer >>= 8;
942 }
943 }
944 }
945
946 if (!bits_in_src_buffer && !dst_bit_off) {
947 /*
948 * Both src and dst pointers are byte aligned, thus we can
949 * just use the optimized memcpy function.
950 */
951 if (nbytes)
952 memcpy(dst, src, nbytes);
953 } else {
954 /*
955 * src buffer is not byte aligned, hence we have to copy each
956 * src byte to the src_buffer variable before extracting a byte
957 * to store in dst.
958 */
959 for (i = 0; i < nbytes; i++) {
960 src_buffer |= src[i] << bits_in_src_buffer;
961 dst[i] = src_buffer;
962 src_buffer >>= 8;
963 }
964 }
965 /* Update dst and src pointers */
966 dst += nbytes;
967 src += nbytes;
968
969 /*
970 * nbits is the number of remaining bits. It should not exceed 8 as
971 * we've already copied as much bytes as possible.
972 */
973 nbits %= 8;
974
975 /*
976 * If there's no more bits to copy to the destination and src buffer
977 * was already byte aligned, then we're done.
978 */
979 if (!nbits && !bits_in_src_buffer)
980 return;
981
982 /* Copy the remaining bits to src_buffer */
983 if (nbits)
984 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
985 bits_in_src_buffer;
986 bits_in_src_buffer += nbits;
987
988 /*
989 * In case there were not enough bits to get a byte aligned dst buffer
990 * prepare the src_buffer variable to match the dst organization (shift
991 * src_buffer by dst_bit_off and retrieve the least significant bits
992 * from dst).
993 */
994 if (dst_bit_off)
995 src_buffer = (src_buffer << dst_bit_off) |
996 (*dst & GENMASK(dst_bit_off - 1, 0));
997 bits_in_src_buffer += dst_bit_off;
998
999 /*
1000 * Keep most significant bits from dst if we end up with an unaligned
1001 * number of bits.
1002 */
1003 nbytes = bits_in_src_buffer / 8;
1004 if (bits_in_src_buffer % 8) {
1005 src_buffer |= (dst[nbytes] &
1006 GENMASK(7, bits_in_src_buffer % 8)) <<
1007 (nbytes * 8);
1008 nbytes++;
1009 }
1010
1011 /* Copy the remaining bytes to dst */
1012 for (i = 0; i < nbytes; i++) {
1013 dst[i] = src_buffer;
1014 src_buffer >>= 8;
1015 }
1016 }
1017
1018 /* add our owner bbt descriptor */
1019 static uint8_t scan_ff_pattern[] = { 0xff };
1020 static struct nand_bbt_descr gpmi_bbt_descr = {
1021 .options = 0,
1022 .offs = 0,
1023 .len = 1,
1024 .pattern = scan_ff_pattern
1025 };
1026
1027 /*
1028 * We may change the layout if we can get the ECC info from the datasheet,
1029 * else we will use all the (page + OOB).
1030 */
gpmi_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1031 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
1032 struct mtd_oob_region *oobregion)
1033 {
1034 struct nand_chip *chip = mtd_to_nand(mtd);
1035 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1036 struct bch_geometry *geo = &this->bch_geometry;
1037
1038 if (section)
1039 return -ERANGE;
1040
1041 oobregion->offset = 0;
1042 oobregion->length = geo->page_size - mtd->writesize;
1043
1044 return 0;
1045 }
1046
gpmi_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1047 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1048 struct mtd_oob_region *oobregion)
1049 {
1050 struct nand_chip *chip = mtd_to_nand(mtd);
1051 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1052 struct bch_geometry *geo = &this->bch_geometry;
1053
1054 if (section)
1055 return -ERANGE;
1056
1057 /* The available oob size we have. */
1058 if (geo->page_size < mtd->writesize + mtd->oobsize) {
1059 oobregion->offset = geo->page_size - mtd->writesize;
1060 oobregion->length = mtd->oobsize - oobregion->offset;
1061 }
1062
1063 return 0;
1064 }
1065
1066 static const char * const gpmi_clks_for_mx2x[] = {
1067 "gpmi_io",
1068 };
1069
1070 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1071 .ecc = gpmi_ooblayout_ecc,
1072 .free = gpmi_ooblayout_free,
1073 };
1074
1075 static const struct gpmi_devdata gpmi_devdata_imx23 = {
1076 .type = IS_MX23,
1077 .bch_max_ecc_strength = 20,
1078 .max_chain_delay = 16000,
1079 .clks = gpmi_clks_for_mx2x,
1080 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1081 };
1082
1083 static const struct gpmi_devdata gpmi_devdata_imx28 = {
1084 .type = IS_MX28,
1085 .bch_max_ecc_strength = 20,
1086 .max_chain_delay = 16000,
1087 .clks = gpmi_clks_for_mx2x,
1088 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1089 };
1090
1091 static const char * const gpmi_clks_for_mx6[] = {
1092 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1093 };
1094
1095 static const struct gpmi_devdata gpmi_devdata_imx6q = {
1096 .type = IS_MX6Q,
1097 .bch_max_ecc_strength = 40,
1098 .max_chain_delay = 12000,
1099 .clks = gpmi_clks_for_mx6,
1100 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1101 };
1102
1103 static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1104 .type = IS_MX6SX,
1105 .bch_max_ecc_strength = 62,
1106 .max_chain_delay = 12000,
1107 .clks = gpmi_clks_for_mx6,
1108 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1109 };
1110
1111 static const char * const gpmi_clks_for_mx7d[] = {
1112 "gpmi_io", "gpmi_bch_apb",
1113 };
1114
1115 static const struct gpmi_devdata gpmi_devdata_imx7d = {
1116 .type = IS_MX7D,
1117 .bch_max_ecc_strength = 62,
1118 .max_chain_delay = 12000,
1119 .clks = gpmi_clks_for_mx7d,
1120 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1121 };
1122
acquire_register_block(struct gpmi_nand_data * this,const char * res_name)1123 static int acquire_register_block(struct gpmi_nand_data *this,
1124 const char *res_name)
1125 {
1126 struct platform_device *pdev = this->pdev;
1127 struct resources *res = &this->resources;
1128 struct resource *r;
1129 void __iomem *p;
1130
1131 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
1132 p = devm_ioremap_resource(&pdev->dev, r);
1133 if (IS_ERR(p))
1134 return PTR_ERR(p);
1135
1136 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1137 res->gpmi_regs = p;
1138 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1139 res->bch_regs = p;
1140 else
1141 dev_err(this->dev, "unknown resource name : %s\n", res_name);
1142
1143 return 0;
1144 }
1145
acquire_bch_irq(struct gpmi_nand_data * this,irq_handler_t irq_h)1146 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1147 {
1148 struct platform_device *pdev = this->pdev;
1149 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1150 struct resource *r;
1151 int err;
1152
1153 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1154 if (!r) {
1155 dev_err(this->dev, "Can't get resource for %s\n", res_name);
1156 return -ENODEV;
1157 }
1158
1159 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1160 if (err)
1161 dev_err(this->dev, "error requesting BCH IRQ\n");
1162
1163 return err;
1164 }
1165
release_dma_channels(struct gpmi_nand_data * this)1166 static void release_dma_channels(struct gpmi_nand_data *this)
1167 {
1168 unsigned int i;
1169 for (i = 0; i < DMA_CHANS; i++)
1170 if (this->dma_chans[i]) {
1171 dma_release_channel(this->dma_chans[i]);
1172 this->dma_chans[i] = NULL;
1173 }
1174 }
1175
acquire_dma_channels(struct gpmi_nand_data * this)1176 static int acquire_dma_channels(struct gpmi_nand_data *this)
1177 {
1178 struct platform_device *pdev = this->pdev;
1179 struct dma_chan *dma_chan;
1180
1181 /* request dma channel */
1182 dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
1183 if (!dma_chan) {
1184 dev_err(this->dev, "Failed to request DMA channel.\n");
1185 goto acquire_err;
1186 }
1187
1188 this->dma_chans[0] = dma_chan;
1189 return 0;
1190
1191 acquire_err:
1192 release_dma_channels(this);
1193 return -EINVAL;
1194 }
1195
gpmi_get_clks(struct gpmi_nand_data * this)1196 static int gpmi_get_clks(struct gpmi_nand_data *this)
1197 {
1198 struct resources *r = &this->resources;
1199 struct clk *clk;
1200 int err, i;
1201
1202 for (i = 0; i < this->devdata->clks_count; i++) {
1203 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1204 if (IS_ERR(clk)) {
1205 err = PTR_ERR(clk);
1206 goto err_clock;
1207 }
1208
1209 r->clock[i] = clk;
1210 }
1211
1212 return 0;
1213
1214 err_clock:
1215 dev_dbg(this->dev, "failed in finding the clocks.\n");
1216 return err;
1217 }
1218
acquire_resources(struct gpmi_nand_data * this)1219 static int acquire_resources(struct gpmi_nand_data *this)
1220 {
1221 int ret;
1222
1223 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1224 if (ret)
1225 goto exit_regs;
1226
1227 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1228 if (ret)
1229 goto exit_regs;
1230
1231 ret = acquire_bch_irq(this, bch_irq);
1232 if (ret)
1233 goto exit_regs;
1234
1235 ret = acquire_dma_channels(this);
1236 if (ret)
1237 goto exit_regs;
1238
1239 ret = gpmi_get_clks(this);
1240 if (ret)
1241 goto exit_clock;
1242 return 0;
1243
1244 exit_clock:
1245 release_dma_channels(this);
1246 exit_regs:
1247 return ret;
1248 }
1249
release_resources(struct gpmi_nand_data * this)1250 static void release_resources(struct gpmi_nand_data *this)
1251 {
1252 release_dma_channels(this);
1253 }
1254
gpmi_free_dma_buffer(struct gpmi_nand_data * this)1255 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1256 {
1257 struct device *dev = this->dev;
1258 struct bch_geometry *geo = &this->bch_geometry;
1259
1260 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1261 dma_free_coherent(dev, geo->auxiliary_size,
1262 this->auxiliary_virt,
1263 this->auxiliary_phys);
1264 kfree(this->data_buffer_dma);
1265 kfree(this->raw_buffer);
1266
1267 this->data_buffer_dma = NULL;
1268 this->raw_buffer = NULL;
1269 }
1270
1271 /* Allocate the DMA buffers */
gpmi_alloc_dma_buffer(struct gpmi_nand_data * this)1272 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1273 {
1274 struct bch_geometry *geo = &this->bch_geometry;
1275 struct device *dev = this->dev;
1276 struct mtd_info *mtd = nand_to_mtd(&this->nand);
1277
1278 /*
1279 * [2] Allocate a read/write data buffer.
1280 * The gpmi_alloc_dma_buffer can be called twice.
1281 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1282 * is called before the NAND identification; and we allocate a
1283 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1284 * is called after.
1285 */
1286 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1287 GFP_DMA | GFP_KERNEL);
1288 if (this->data_buffer_dma == NULL)
1289 goto error_alloc;
1290
1291 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1292 &this->auxiliary_phys, GFP_DMA);
1293 if (!this->auxiliary_virt)
1294 goto error_alloc;
1295
1296 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1297 if (!this->raw_buffer)
1298 goto error_alloc;
1299
1300 return 0;
1301
1302 error_alloc:
1303 gpmi_free_dma_buffer(this);
1304 return -ENOMEM;
1305 }
1306
1307 /*
1308 * Handles block mark swapping.
1309 * It can be called in swapping the block mark, or swapping it back,
1310 * because the the operations are the same.
1311 */
block_mark_swapping(struct gpmi_nand_data * this,void * payload,void * auxiliary)1312 static void block_mark_swapping(struct gpmi_nand_data *this,
1313 void *payload, void *auxiliary)
1314 {
1315 struct bch_geometry *nfc_geo = &this->bch_geometry;
1316 unsigned char *p;
1317 unsigned char *a;
1318 unsigned int bit;
1319 unsigned char mask;
1320 unsigned char from_data;
1321 unsigned char from_oob;
1322
1323 if (!this->swap_block_mark)
1324 return;
1325
1326 /*
1327 * If control arrives here, we're swapping. Make some convenience
1328 * variables.
1329 */
1330 bit = nfc_geo->block_mark_bit_offset;
1331 p = payload + nfc_geo->block_mark_byte_offset;
1332 a = auxiliary;
1333
1334 /*
1335 * Get the byte from the data area that overlays the block mark. Since
1336 * the ECC engine applies its own view to the bits in the page, the
1337 * physical block mark won't (in general) appear on a byte boundary in
1338 * the data.
1339 */
1340 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1341
1342 /* Get the byte from the OOB. */
1343 from_oob = a[0];
1344
1345 /* Swap them. */
1346 a[0] = from_data;
1347
1348 mask = (0x1 << bit) - 1;
1349 p[0] = (p[0] & mask) | (from_oob << bit);
1350
1351 mask = ~0 << bit;
1352 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1353 }
1354
gpmi_count_bitflips(struct nand_chip * chip,void * buf,int first,int last,int meta)1355 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1356 int last, int meta)
1357 {
1358 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1359 struct bch_geometry *nfc_geo = &this->bch_geometry;
1360 struct mtd_info *mtd = nand_to_mtd(chip);
1361 int i;
1362 unsigned char *status;
1363 unsigned int max_bitflips = 0;
1364
1365 /* Loop over status bytes, accumulating ECC status. */
1366 status = this->auxiliary_virt + ALIGN(meta, 4);
1367
1368 for (i = first; i < last; i++, status++) {
1369 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1370 continue;
1371
1372 if (*status == STATUS_UNCORRECTABLE) {
1373 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1374 u8 *eccbuf = this->raw_buffer;
1375 int offset, bitoffset;
1376 int eccbytes;
1377 int flips;
1378
1379 /* Read ECC bytes into our internal raw_buffer */
1380 offset = nfc_geo->metadata_size * 8;
1381 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1382 offset -= eccbits;
1383 bitoffset = offset % 8;
1384 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1385 offset /= 8;
1386 eccbytes -= offset;
1387 nand_change_read_column_op(chip, offset, eccbuf,
1388 eccbytes, false);
1389
1390 /*
1391 * ECC data are not byte aligned and we may have
1392 * in-band data in the first and last byte of
1393 * eccbuf. Set non-eccbits to one so that
1394 * nand_check_erased_ecc_chunk() does not count them
1395 * as bitflips.
1396 */
1397 if (bitoffset)
1398 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1399
1400 bitoffset = (bitoffset + eccbits) % 8;
1401 if (bitoffset)
1402 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1403
1404 /*
1405 * The ECC hardware has an uncorrectable ECC status
1406 * code in case we have bitflips in an erased page. As
1407 * nothing was written into this subpage the ECC is
1408 * obviously wrong and we can not trust it. We assume
1409 * at this point that we are reading an erased page and
1410 * try to correct the bitflips in buffer up to
1411 * ecc_strength bitflips. If this is a page with random
1412 * data, we exceed this number of bitflips and have a
1413 * ECC failure. Otherwise we use the corrected buffer.
1414 */
1415 if (i == 0) {
1416 /* The first block includes metadata */
1417 flips = nand_check_erased_ecc_chunk(
1418 buf + i * nfc_geo->ecc_chunk_size,
1419 nfc_geo->ecc_chunk_size,
1420 eccbuf, eccbytes,
1421 this->auxiliary_virt,
1422 nfc_geo->metadata_size,
1423 nfc_geo->ecc_strength);
1424 } else {
1425 flips = nand_check_erased_ecc_chunk(
1426 buf + i * nfc_geo->ecc_chunk_size,
1427 nfc_geo->ecc_chunk_size,
1428 eccbuf, eccbytes,
1429 NULL, 0,
1430 nfc_geo->ecc_strength);
1431 }
1432
1433 if (flips > 0) {
1434 max_bitflips = max_t(unsigned int, max_bitflips,
1435 flips);
1436 mtd->ecc_stats.corrected += flips;
1437 continue;
1438 }
1439
1440 mtd->ecc_stats.failed++;
1441 continue;
1442 }
1443
1444 mtd->ecc_stats.corrected += *status;
1445 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1446 }
1447
1448 return max_bitflips;
1449 }
1450
gpmi_bch_layout_std(struct gpmi_nand_data * this)1451 static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1452 {
1453 struct bch_geometry *geo = &this->bch_geometry;
1454 unsigned int ecc_strength = geo->ecc_strength >> 1;
1455 unsigned int gf_len = geo->gf_len;
1456 unsigned int block_size = geo->ecc_chunk_size;
1457
1458 this->bch_flashlayout0 =
1459 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1460 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1461 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1462 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1463 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1464
1465 this->bch_flashlayout1 =
1466 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1467 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1468 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1469 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1470 }
1471
gpmi_ecc_read_page(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1472 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1473 int oob_required, int page)
1474 {
1475 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1476 struct mtd_info *mtd = nand_to_mtd(chip);
1477 struct bch_geometry *geo = &this->bch_geometry;
1478 unsigned int max_bitflips;
1479 int ret;
1480
1481 gpmi_bch_layout_std(this);
1482 this->bch = true;
1483
1484 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1485 if (ret)
1486 return ret;
1487
1488 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1489 geo->ecc_chunk_count,
1490 geo->auxiliary_status_offset);
1491
1492 /* handle the block mark swapping */
1493 block_mark_swapping(this, buf, this->auxiliary_virt);
1494
1495 if (oob_required) {
1496 /*
1497 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1498 * for details about our policy for delivering the OOB.
1499 *
1500 * We fill the caller's buffer with set bits, and then copy the
1501 * block mark to th caller's buffer. Note that, if block mark
1502 * swapping was necessary, it has already been done, so we can
1503 * rely on the first byte of the auxiliary buffer to contain
1504 * the block mark.
1505 */
1506 memset(chip->oob_poi, ~0, mtd->oobsize);
1507 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1508 }
1509
1510 return max_bitflips;
1511 }
1512
1513 /* Fake a virtual small page for the subpage read */
gpmi_ecc_read_subpage(struct nand_chip * chip,uint32_t offs,uint32_t len,uint8_t * buf,int page)1514 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1515 uint32_t len, uint8_t *buf, int page)
1516 {
1517 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1518 struct bch_geometry *geo = &this->bch_geometry;
1519 int size = chip->ecc.size; /* ECC chunk size */
1520 int meta, n, page_size;
1521 unsigned int max_bitflips;
1522 unsigned int ecc_strength;
1523 int first, last, marker_pos;
1524 int ecc_parity_size;
1525 int col = 0;
1526 int ret;
1527
1528 /* The size of ECC parity */
1529 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1530
1531 /* Align it with the chunk size */
1532 first = offs / size;
1533 last = (offs + len - 1) / size;
1534
1535 if (this->swap_block_mark) {
1536 /*
1537 * Find the chunk which contains the Block Marker.
1538 * If this chunk is in the range of [first, last],
1539 * we have to read out the whole page.
1540 * Why? since we had swapped the data at the position of Block
1541 * Marker to the metadata which is bound with the chunk 0.
1542 */
1543 marker_pos = geo->block_mark_byte_offset / size;
1544 if (last >= marker_pos && first <= marker_pos) {
1545 dev_dbg(this->dev,
1546 "page:%d, first:%d, last:%d, marker at:%d\n",
1547 page, first, last, marker_pos);
1548 return gpmi_ecc_read_page(chip, buf, 0, page);
1549 }
1550 }
1551
1552 meta = geo->metadata_size;
1553 if (first) {
1554 col = meta + (size + ecc_parity_size) * first;
1555 meta = 0;
1556 buf = buf + first * size;
1557 }
1558
1559 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1560
1561 n = last - first + 1;
1562 page_size = meta + (size + ecc_parity_size) * n;
1563 ecc_strength = geo->ecc_strength >> 1;
1564
1565 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1566 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1567 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1568 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1569 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1570
1571 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1572 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1573 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1574 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1575
1576 this->bch = true;
1577
1578 ret = nand_read_page_op(chip, page, col, buf, page_size);
1579 if (ret)
1580 return ret;
1581
1582 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1583 page, offs, len, col, first, n, page_size);
1584
1585 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1586
1587 return max_bitflips;
1588 }
1589
gpmi_ecc_write_page(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1590 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1591 int oob_required, int page)
1592 {
1593 struct mtd_info *mtd = nand_to_mtd(chip);
1594 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1595 struct bch_geometry *nfc_geo = &this->bch_geometry;
1596 int ret;
1597
1598 dev_dbg(this->dev, "ecc write page.\n");
1599
1600 gpmi_bch_layout_std(this);
1601 this->bch = true;
1602
1603 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1604
1605 if (this->swap_block_mark) {
1606 /*
1607 * When doing bad block marker swapping we must always copy the
1608 * input buffer as we can't modify the const buffer.
1609 */
1610 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1611 buf = this->data_buffer_dma;
1612 block_mark_swapping(this, this->data_buffer_dma,
1613 this->auxiliary_virt);
1614 }
1615
1616 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1617
1618 return ret;
1619 }
1620
1621 /*
1622 * There are several places in this driver where we have to handle the OOB and
1623 * block marks. This is the function where things are the most complicated, so
1624 * this is where we try to explain it all. All the other places refer back to
1625 * here.
1626 *
1627 * These are the rules, in order of decreasing importance:
1628 *
1629 * 1) Nothing the caller does can be allowed to imperil the block mark.
1630 *
1631 * 2) In read operations, the first byte of the OOB we return must reflect the
1632 * true state of the block mark, no matter where that block mark appears in
1633 * the physical page.
1634 *
1635 * 3) ECC-based read operations return an OOB full of set bits (since we never
1636 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1637 * return).
1638 *
1639 * 4) "Raw" read operations return a direct view of the physical bytes in the
1640 * page, using the conventional definition of which bytes are data and which
1641 * are OOB. This gives the caller a way to see the actual, physical bytes
1642 * in the page, without the distortions applied by our ECC engine.
1643 *
1644 *
1645 * What we do for this specific read operation depends on two questions:
1646 *
1647 * 1) Are we doing a "raw" read, or an ECC-based read?
1648 *
1649 * 2) Are we using block mark swapping or transcription?
1650 *
1651 * There are four cases, illustrated by the following Karnaugh map:
1652 *
1653 * | Raw | ECC-based |
1654 * -------------+-------------------------+-------------------------+
1655 * | Read the conventional | |
1656 * | OOB at the end of the | |
1657 * Swapping | page and return it. It | |
1658 * | contains exactly what | |
1659 * | we want. | Read the block mark and |
1660 * -------------+-------------------------+ return it in a buffer |
1661 * | Read the conventional | full of set bits. |
1662 * | OOB at the end of the | |
1663 * | page and also the block | |
1664 * Transcribing | mark in the metadata. | |
1665 * | Copy the block mark | |
1666 * | into the first byte of | |
1667 * | the OOB. | |
1668 * -------------+-------------------------+-------------------------+
1669 *
1670 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1671 * giving an accurate view of the actual, physical bytes in the page (we're
1672 * overwriting the block mark). That's OK because it's more important to follow
1673 * rule #2.
1674 *
1675 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1676 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1677 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1678 * ECC-based or raw view of the page is implicit in which function it calls
1679 * (there is a similar pair of ECC-based/raw functions for writing).
1680 */
gpmi_ecc_read_oob(struct nand_chip * chip,int page)1681 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1682 {
1683 struct mtd_info *mtd = nand_to_mtd(chip);
1684 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1685 int ret;
1686
1687 /* clear the OOB buffer */
1688 memset(chip->oob_poi, ~0, mtd->oobsize);
1689
1690 /* Read out the conventional OOB. */
1691 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1692 mtd->oobsize);
1693 if (ret)
1694 return ret;
1695
1696 /*
1697 * Now, we want to make sure the block mark is correct. In the
1698 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1699 * Otherwise, we need to explicitly read it.
1700 */
1701 if (GPMI_IS_MX23(this)) {
1702 /* Read the block mark into the first byte of the OOB buffer. */
1703 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1704 if (ret)
1705 return ret;
1706 }
1707
1708 return 0;
1709 }
1710
gpmi_ecc_write_oob(struct nand_chip * chip,int page)1711 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1712 {
1713 struct mtd_info *mtd = nand_to_mtd(chip);
1714 struct mtd_oob_region of = { };
1715
1716 /* Do we have available oob area? */
1717 mtd_ooblayout_free(mtd, 0, &of);
1718 if (!of.length)
1719 return -EPERM;
1720
1721 if (!nand_is_slc(chip))
1722 return -EPERM;
1723
1724 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1725 chip->oob_poi + of.offset, of.length);
1726 }
1727
1728 /*
1729 * This function reads a NAND page without involving the ECC engine (no HW
1730 * ECC correction).
1731 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1732 * inline (interleaved with payload DATA), and do not align data chunk on
1733 * byte boundaries.
1734 * We thus need to take care moving the payload data and ECC bits stored in the
1735 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1736 *
1737 * See set_geometry_by_ecc_info inline comments to have a full description
1738 * of the layout used by the GPMI controller.
1739 */
gpmi_ecc_read_page_raw(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1740 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1741 int oob_required, int page)
1742 {
1743 struct mtd_info *mtd = nand_to_mtd(chip);
1744 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1745 struct bch_geometry *nfc_geo = &this->bch_geometry;
1746 int eccsize = nfc_geo->ecc_chunk_size;
1747 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1748 u8 *tmp_buf = this->raw_buffer;
1749 size_t src_bit_off;
1750 size_t oob_bit_off;
1751 size_t oob_byte_off;
1752 uint8_t *oob = chip->oob_poi;
1753 int step;
1754 int ret;
1755
1756 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1757 mtd->writesize + mtd->oobsize);
1758 if (ret)
1759 return ret;
1760
1761 /*
1762 * If required, swap the bad block marker and the data stored in the
1763 * metadata section, so that we don't wrongly consider a block as bad.
1764 *
1765 * See the layout description for a detailed explanation on why this
1766 * is needed.
1767 */
1768 if (this->swap_block_mark)
1769 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1770
1771 /*
1772 * Copy the metadata section into the oob buffer (this section is
1773 * guaranteed to be aligned on a byte boundary).
1774 */
1775 if (oob_required)
1776 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1777
1778 oob_bit_off = nfc_geo->metadata_size * 8;
1779 src_bit_off = oob_bit_off;
1780
1781 /* Extract interleaved payload data and ECC bits */
1782 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1783 if (buf)
1784 gpmi_copy_bits(buf, step * eccsize * 8,
1785 tmp_buf, src_bit_off,
1786 eccsize * 8);
1787 src_bit_off += eccsize * 8;
1788
1789 /* Align last ECC block to align a byte boundary */
1790 if (step == nfc_geo->ecc_chunk_count - 1 &&
1791 (oob_bit_off + eccbits) % 8)
1792 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1793
1794 if (oob_required)
1795 gpmi_copy_bits(oob, oob_bit_off,
1796 tmp_buf, src_bit_off,
1797 eccbits);
1798
1799 src_bit_off += eccbits;
1800 oob_bit_off += eccbits;
1801 }
1802
1803 if (oob_required) {
1804 oob_byte_off = oob_bit_off / 8;
1805
1806 if (oob_byte_off < mtd->oobsize)
1807 memcpy(oob + oob_byte_off,
1808 tmp_buf + mtd->writesize + oob_byte_off,
1809 mtd->oobsize - oob_byte_off);
1810 }
1811
1812 return 0;
1813 }
1814
1815 /*
1816 * This function writes a NAND page without involving the ECC engine (no HW
1817 * ECC generation).
1818 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1819 * inline (interleaved with payload DATA), and do not align data chunk on
1820 * byte boundaries.
1821 * We thus need to take care moving the OOB area at the right place in the
1822 * final page, which is why we're using gpmi_copy_bits.
1823 *
1824 * See set_geometry_by_ecc_info inline comments to have a full description
1825 * of the layout used by the GPMI controller.
1826 */
gpmi_ecc_write_page_raw(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1827 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1828 int oob_required, int page)
1829 {
1830 struct mtd_info *mtd = nand_to_mtd(chip);
1831 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1832 struct bch_geometry *nfc_geo = &this->bch_geometry;
1833 int eccsize = nfc_geo->ecc_chunk_size;
1834 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1835 u8 *tmp_buf = this->raw_buffer;
1836 uint8_t *oob = chip->oob_poi;
1837 size_t dst_bit_off;
1838 size_t oob_bit_off;
1839 size_t oob_byte_off;
1840 int step;
1841
1842 /*
1843 * Initialize all bits to 1 in case we don't have a buffer for the
1844 * payload or oob data in order to leave unspecified bits of data
1845 * to their initial state.
1846 */
1847 if (!buf || !oob_required)
1848 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1849
1850 /*
1851 * First copy the metadata section (stored in oob buffer) at the
1852 * beginning of the page, as imposed by the GPMI layout.
1853 */
1854 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1855 oob_bit_off = nfc_geo->metadata_size * 8;
1856 dst_bit_off = oob_bit_off;
1857
1858 /* Interleave payload data and ECC bits */
1859 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1860 if (buf)
1861 gpmi_copy_bits(tmp_buf, dst_bit_off,
1862 buf, step * eccsize * 8, eccsize * 8);
1863 dst_bit_off += eccsize * 8;
1864
1865 /* Align last ECC block to align a byte boundary */
1866 if (step == nfc_geo->ecc_chunk_count - 1 &&
1867 (oob_bit_off + eccbits) % 8)
1868 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1869
1870 if (oob_required)
1871 gpmi_copy_bits(tmp_buf, dst_bit_off,
1872 oob, oob_bit_off, eccbits);
1873
1874 dst_bit_off += eccbits;
1875 oob_bit_off += eccbits;
1876 }
1877
1878 oob_byte_off = oob_bit_off / 8;
1879
1880 if (oob_required && oob_byte_off < mtd->oobsize)
1881 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1882 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1883
1884 /*
1885 * If required, swap the bad block marker and the first byte of the
1886 * metadata section, so that we don't modify the bad block marker.
1887 *
1888 * See the layout description for a detailed explanation on why this
1889 * is needed.
1890 */
1891 if (this->swap_block_mark)
1892 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1893
1894 return nand_prog_page_op(chip, page, 0, tmp_buf,
1895 mtd->writesize + mtd->oobsize);
1896 }
1897
gpmi_ecc_read_oob_raw(struct nand_chip * chip,int page)1898 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1899 {
1900 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1901 }
1902
gpmi_ecc_write_oob_raw(struct nand_chip * chip,int page)1903 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1904 {
1905 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1906 }
1907
gpmi_block_markbad(struct nand_chip * chip,loff_t ofs)1908 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1909 {
1910 struct mtd_info *mtd = nand_to_mtd(chip);
1911 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1912 int ret = 0;
1913 uint8_t *block_mark;
1914 int column, page, chipnr;
1915
1916 chipnr = (int)(ofs >> chip->chip_shift);
1917 nand_select_target(chip, chipnr);
1918
1919 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1920
1921 /* Write the block mark. */
1922 block_mark = this->data_buffer_dma;
1923 block_mark[0] = 0; /* bad block marker */
1924
1925 /* Shift to get page */
1926 page = (int)(ofs >> chip->page_shift);
1927
1928 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1929
1930 nand_deselect_target(chip);
1931
1932 return ret;
1933 }
1934
nand_boot_set_geometry(struct gpmi_nand_data * this)1935 static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1936 {
1937 struct boot_rom_geometry *geometry = &this->rom_geometry;
1938
1939 /*
1940 * Set the boot block stride size.
1941 *
1942 * In principle, we should be reading this from the OTP bits, since
1943 * that's where the ROM is going to get it. In fact, we don't have any
1944 * way to read the OTP bits, so we go with the default and hope for the
1945 * best.
1946 */
1947 geometry->stride_size_in_pages = 64;
1948
1949 /*
1950 * Set the search area stride exponent.
1951 *
1952 * In principle, we should be reading this from the OTP bits, since
1953 * that's where the ROM is going to get it. In fact, we don't have any
1954 * way to read the OTP bits, so we go with the default and hope for the
1955 * best.
1956 */
1957 geometry->search_area_stride_exponent = 2;
1958 return 0;
1959 }
1960
1961 static const char *fingerprint = "STMP";
mx23_check_transcription_stamp(struct gpmi_nand_data * this)1962 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1963 {
1964 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1965 struct device *dev = this->dev;
1966 struct nand_chip *chip = &this->nand;
1967 unsigned int search_area_size_in_strides;
1968 unsigned int stride;
1969 unsigned int page;
1970 u8 *buffer = nand_get_data_buf(chip);
1971 int found_an_ncb_fingerprint = false;
1972 int ret;
1973
1974 /* Compute the number of strides in a search area. */
1975 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1976
1977 nand_select_target(chip, 0);
1978
1979 /*
1980 * Loop through the first search area, looking for the NCB fingerprint.
1981 */
1982 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1983
1984 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1985 /* Compute the page addresses. */
1986 page = stride * rom_geo->stride_size_in_pages;
1987
1988 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1989
1990 /*
1991 * Read the NCB fingerprint. The fingerprint is four bytes long
1992 * and starts in the 12th byte of the page.
1993 */
1994 ret = nand_read_page_op(chip, page, 12, buffer,
1995 strlen(fingerprint));
1996 if (ret)
1997 continue;
1998
1999 /* Look for the fingerprint. */
2000 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
2001 found_an_ncb_fingerprint = true;
2002 break;
2003 }
2004
2005 }
2006
2007 nand_deselect_target(chip);
2008
2009 if (found_an_ncb_fingerprint)
2010 dev_dbg(dev, "\tFound a fingerprint\n");
2011 else
2012 dev_dbg(dev, "\tNo fingerprint found\n");
2013 return found_an_ncb_fingerprint;
2014 }
2015
2016 /* Writes a transcription stamp. */
mx23_write_transcription_stamp(struct gpmi_nand_data * this)2017 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
2018 {
2019 struct device *dev = this->dev;
2020 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
2021 struct nand_chip *chip = &this->nand;
2022 struct mtd_info *mtd = nand_to_mtd(chip);
2023 unsigned int block_size_in_pages;
2024 unsigned int search_area_size_in_strides;
2025 unsigned int search_area_size_in_pages;
2026 unsigned int search_area_size_in_blocks;
2027 unsigned int block;
2028 unsigned int stride;
2029 unsigned int page;
2030 u8 *buffer = nand_get_data_buf(chip);
2031 int status;
2032
2033 /* Compute the search area geometry. */
2034 block_size_in_pages = mtd->erasesize / mtd->writesize;
2035 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2036 search_area_size_in_pages = search_area_size_in_strides *
2037 rom_geo->stride_size_in_pages;
2038 search_area_size_in_blocks =
2039 (search_area_size_in_pages + (block_size_in_pages - 1)) /
2040 block_size_in_pages;
2041
2042 dev_dbg(dev, "Search Area Geometry :\n");
2043 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2044 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2045 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
2046
2047 nand_select_target(chip, 0);
2048
2049 /* Loop over blocks in the first search area, erasing them. */
2050 dev_dbg(dev, "Erasing the search area...\n");
2051
2052 for (block = 0; block < search_area_size_in_blocks; block++) {
2053 /* Erase this block. */
2054 dev_dbg(dev, "\tErasing block 0x%x\n", block);
2055 status = nand_erase_op(chip, block);
2056 if (status)
2057 dev_err(dev, "[%s] Erase failed.\n", __func__);
2058 }
2059
2060 /* Write the NCB fingerprint into the page buffer. */
2061 memset(buffer, ~0, mtd->writesize);
2062 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2063
2064 /* Loop through the first search area, writing NCB fingerprints. */
2065 dev_dbg(dev, "Writing NCB fingerprints...\n");
2066 for (stride = 0; stride < search_area_size_in_strides; stride++) {
2067 /* Compute the page addresses. */
2068 page = stride * rom_geo->stride_size_in_pages;
2069
2070 /* Write the first page of the current stride. */
2071 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2072
2073 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2074 if (status)
2075 dev_err(dev, "[%s] Write failed.\n", __func__);
2076 }
2077
2078 nand_deselect_target(chip);
2079
2080 return 0;
2081 }
2082
mx23_boot_init(struct gpmi_nand_data * this)2083 static int mx23_boot_init(struct gpmi_nand_data *this)
2084 {
2085 struct device *dev = this->dev;
2086 struct nand_chip *chip = &this->nand;
2087 struct mtd_info *mtd = nand_to_mtd(chip);
2088 unsigned int block_count;
2089 unsigned int block;
2090 int chipnr;
2091 int page;
2092 loff_t byte;
2093 uint8_t block_mark;
2094 int ret = 0;
2095
2096 /*
2097 * If control arrives here, we can't use block mark swapping, which
2098 * means we're forced to use transcription. First, scan for the
2099 * transcription stamp. If we find it, then we don't have to do
2100 * anything -- the block marks are already transcribed.
2101 */
2102 if (mx23_check_transcription_stamp(this))
2103 return 0;
2104
2105 /*
2106 * If control arrives here, we couldn't find a transcription stamp, so
2107 * so we presume the block marks are in the conventional location.
2108 */
2109 dev_dbg(dev, "Transcribing bad block marks...\n");
2110
2111 /* Compute the number of blocks in the entire medium. */
2112 block_count = nanddev_eraseblocks_per_target(&chip->base);
2113
2114 /*
2115 * Loop over all the blocks in the medium, transcribing block marks as
2116 * we go.
2117 */
2118 for (block = 0; block < block_count; block++) {
2119 /*
2120 * Compute the chip, page and byte addresses for this block's
2121 * conventional mark.
2122 */
2123 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2124 page = block << (chip->phys_erase_shift - chip->page_shift);
2125 byte = block << chip->phys_erase_shift;
2126
2127 /* Send the command to read the conventional block mark. */
2128 nand_select_target(chip, chipnr);
2129 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2130 1);
2131 nand_deselect_target(chip);
2132
2133 if (ret)
2134 continue;
2135
2136 /*
2137 * Check if the block is marked bad. If so, we need to mark it
2138 * again, but this time the result will be a mark in the
2139 * location where we transcribe block marks.
2140 */
2141 if (block_mark != 0xff) {
2142 dev_dbg(dev, "Transcribing mark in block %u\n", block);
2143 ret = chip->legacy.block_markbad(chip, byte);
2144 if (ret)
2145 dev_err(dev,
2146 "Failed to mark block bad with ret %d\n",
2147 ret);
2148 }
2149 }
2150
2151 /* Write the stamp that indicates we've transcribed the block marks. */
2152 mx23_write_transcription_stamp(this);
2153 return 0;
2154 }
2155
nand_boot_init(struct gpmi_nand_data * this)2156 static int nand_boot_init(struct gpmi_nand_data *this)
2157 {
2158 nand_boot_set_geometry(this);
2159
2160 /* This is ROM arch-specific initilization before the BBT scanning. */
2161 if (GPMI_IS_MX23(this))
2162 return mx23_boot_init(this);
2163 return 0;
2164 }
2165
gpmi_set_geometry(struct gpmi_nand_data * this)2166 static int gpmi_set_geometry(struct gpmi_nand_data *this)
2167 {
2168 int ret;
2169
2170 /* Free the temporary DMA memory for reading ID. */
2171 gpmi_free_dma_buffer(this);
2172
2173 /* Set up the NFC geometry which is used by BCH. */
2174 ret = bch_set_geometry(this);
2175 if (ret) {
2176 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2177 return ret;
2178 }
2179
2180 /* Alloc the new DMA buffers according to the pagesize and oobsize */
2181 return gpmi_alloc_dma_buffer(this);
2182 }
2183
gpmi_init_last(struct gpmi_nand_data * this)2184 static int gpmi_init_last(struct gpmi_nand_data *this)
2185 {
2186 struct nand_chip *chip = &this->nand;
2187 struct mtd_info *mtd = nand_to_mtd(chip);
2188 struct nand_ecc_ctrl *ecc = &chip->ecc;
2189 struct bch_geometry *bch_geo = &this->bch_geometry;
2190 int ret;
2191
2192 /* Set up the medium geometry */
2193 ret = gpmi_set_geometry(this);
2194 if (ret)
2195 return ret;
2196
2197 /* Init the nand_ecc_ctrl{} */
2198 ecc->read_page = gpmi_ecc_read_page;
2199 ecc->write_page = gpmi_ecc_write_page;
2200 ecc->read_oob = gpmi_ecc_read_oob;
2201 ecc->write_oob = gpmi_ecc_write_oob;
2202 ecc->read_page_raw = gpmi_ecc_read_page_raw;
2203 ecc->write_page_raw = gpmi_ecc_write_page_raw;
2204 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2205 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2206 ecc->mode = NAND_ECC_HW;
2207 ecc->size = bch_geo->ecc_chunk_size;
2208 ecc->strength = bch_geo->ecc_strength;
2209 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2210
2211 /*
2212 * We only enable the subpage read when:
2213 * (1) the chip is imx6, and
2214 * (2) the size of the ECC parity is byte aligned.
2215 */
2216 if (GPMI_IS_MX6(this) &&
2217 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2218 ecc->read_subpage = gpmi_ecc_read_subpage;
2219 chip->options |= NAND_SUBPAGE_READ;
2220 }
2221
2222 return 0;
2223 }
2224
gpmi_nand_attach_chip(struct nand_chip * chip)2225 static int gpmi_nand_attach_chip(struct nand_chip *chip)
2226 {
2227 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2228 int ret;
2229
2230 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2231 chip->bbt_options |= NAND_BBT_NO_OOB;
2232
2233 if (of_property_read_bool(this->dev->of_node,
2234 "fsl,no-blockmark-swap"))
2235 this->swap_block_mark = false;
2236 }
2237 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2238 this->swap_block_mark ? "en" : "dis");
2239
2240 ret = gpmi_init_last(this);
2241 if (ret)
2242 return ret;
2243
2244 chip->options |= NAND_SKIP_BBTSCAN;
2245
2246 return 0;
2247 }
2248
get_next_transfer(struct gpmi_nand_data * this)2249 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2250 {
2251 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2252
2253 this->ntransfers++;
2254
2255 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2256 return NULL;
2257
2258 return transfer;
2259 }
2260
gpmi_chain_command(struct gpmi_nand_data * this,u8 cmd,const u8 * addr,int naddr)2261 static struct dma_async_tx_descriptor *gpmi_chain_command(
2262 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2263 {
2264 struct dma_chan *channel = get_dma_chan(this);
2265 struct dma_async_tx_descriptor *desc;
2266 struct gpmi_transfer *transfer;
2267 int chip = this->nand.cur_cs;
2268 u32 pio[3];
2269
2270 /* [1] send out the PIO words */
2271 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2272 | BM_GPMI_CTRL0_WORD_LENGTH
2273 | BF_GPMI_CTRL0_CS(chip, this)
2274 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2275 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2276 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2277 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2278 pio[1] = 0;
2279 pio[2] = 0;
2280 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2281 DMA_TRANS_NONE, 0);
2282 if (!desc)
2283 return NULL;
2284
2285 transfer = get_next_transfer(this);
2286 if (!transfer)
2287 return NULL;
2288
2289 transfer->cmdbuf[0] = cmd;
2290 if (naddr)
2291 memcpy(&transfer->cmdbuf[1], addr, naddr);
2292
2293 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2294 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2295
2296 transfer->direction = DMA_TO_DEVICE;
2297
2298 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2299 MXS_DMA_CTRL_WAIT4END);
2300 return desc;
2301 }
2302
gpmi_chain_wait_ready(struct gpmi_nand_data * this)2303 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2304 struct gpmi_nand_data *this)
2305 {
2306 struct dma_chan *channel = get_dma_chan(this);
2307 u32 pio[2];
2308
2309 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2310 | BM_GPMI_CTRL0_WORD_LENGTH
2311 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2312 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2313 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2314 | BF_GPMI_CTRL0_XFER_COUNT(0);
2315 pio[1] = 0;
2316
2317 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2318 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2319 }
2320
gpmi_chain_data_read(struct gpmi_nand_data * this,void * buf,int raw_len,bool * direct)2321 static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2322 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2323 {
2324 struct dma_async_tx_descriptor *desc;
2325 struct dma_chan *channel = get_dma_chan(this);
2326 struct gpmi_transfer *transfer;
2327 u32 pio[6] = {};
2328
2329 transfer = get_next_transfer(this);
2330 if (!transfer)
2331 return NULL;
2332
2333 transfer->direction = DMA_FROM_DEVICE;
2334
2335 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2336 DMA_FROM_DEVICE);
2337
2338 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2339 | BM_GPMI_CTRL0_WORD_LENGTH
2340 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2341 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2342 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2343 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2344
2345 if (this->bch) {
2346 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2347 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2348 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2349 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2350 pio[3] = raw_len;
2351 pio[4] = transfer->sgl.dma_address;
2352 pio[5] = this->auxiliary_phys;
2353 }
2354
2355 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2356 DMA_TRANS_NONE, 0);
2357 if (!desc)
2358 return NULL;
2359
2360 if (!this->bch)
2361 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2362 DMA_DEV_TO_MEM,
2363 MXS_DMA_CTRL_WAIT4END);
2364
2365 return desc;
2366 }
2367
gpmi_chain_data_write(struct gpmi_nand_data * this,const void * buf,int raw_len)2368 static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2369 struct gpmi_nand_data *this, const void *buf, int raw_len)
2370 {
2371 struct dma_chan *channel = get_dma_chan(this);
2372 struct dma_async_tx_descriptor *desc;
2373 struct gpmi_transfer *transfer;
2374 u32 pio[6] = {};
2375
2376 transfer = get_next_transfer(this);
2377 if (!transfer)
2378 return NULL;
2379
2380 transfer->direction = DMA_TO_DEVICE;
2381
2382 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2383
2384 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2385 | BM_GPMI_CTRL0_WORD_LENGTH
2386 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2387 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2388 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2389 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2390
2391 if (this->bch) {
2392 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2393 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2394 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2395 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2396 pio[3] = raw_len;
2397 pio[4] = transfer->sgl.dma_address;
2398 pio[5] = this->auxiliary_phys;
2399 }
2400
2401 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2402 DMA_TRANS_NONE,
2403 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2404 if (!desc)
2405 return NULL;
2406
2407 if (!this->bch)
2408 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2409 DMA_MEM_TO_DEV,
2410 MXS_DMA_CTRL_WAIT4END);
2411
2412 return desc;
2413 }
2414
gpmi_nfc_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)2415 static int gpmi_nfc_exec_op(struct nand_chip *chip,
2416 const struct nand_operation *op,
2417 bool check_only)
2418 {
2419 const struct nand_op_instr *instr;
2420 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2421 struct dma_async_tx_descriptor *desc = NULL;
2422 int i, ret, buf_len = 0, nbufs = 0;
2423 u8 cmd = 0;
2424 void *buf_read = NULL;
2425 const void *buf_write = NULL;
2426 bool direct = false;
2427 struct completion *dma_completion, *bch_completion;
2428 unsigned long to;
2429
2430 this->ntransfers = 0;
2431 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2432 this->transfers[i].direction = DMA_NONE;
2433
2434 ret = pm_runtime_get_sync(this->dev);
2435 if (ret < 0) {
2436 pm_runtime_put_noidle(this->dev);
2437 return ret;
2438 }
2439
2440 /*
2441 * This driver currently supports only one NAND chip. Plus, dies share
2442 * the same configuration. So once timings have been applied on the
2443 * controller side, they will not change anymore. When the time will
2444 * come, the check on must_apply_timings will have to be dropped.
2445 */
2446 if (this->hw.must_apply_timings) {
2447 this->hw.must_apply_timings = false;
2448 ret = gpmi_nfc_apply_timings(this);
2449 if (ret)
2450 goto out_pm;
2451 }
2452
2453 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2454
2455 for (i = 0; i < op->ninstrs; i++) {
2456 instr = &op->instrs[i];
2457
2458 nand_op_trace(" ", instr);
2459
2460 switch (instr->type) {
2461 case NAND_OP_WAITRDY_INSTR:
2462 desc = gpmi_chain_wait_ready(this);
2463 break;
2464 case NAND_OP_CMD_INSTR:
2465 cmd = instr->ctx.cmd.opcode;
2466
2467 /*
2468 * When this command has an address cycle chain it
2469 * together with the address cycle
2470 */
2471 if (i + 1 != op->ninstrs &&
2472 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2473 continue;
2474
2475 desc = gpmi_chain_command(this, cmd, NULL, 0);
2476
2477 break;
2478 case NAND_OP_ADDR_INSTR:
2479 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2480 instr->ctx.addr.naddrs);
2481 break;
2482 case NAND_OP_DATA_OUT_INSTR:
2483 buf_write = instr->ctx.data.buf.out;
2484 buf_len = instr->ctx.data.len;
2485 nbufs++;
2486
2487 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2488
2489 break;
2490 case NAND_OP_DATA_IN_INSTR:
2491 if (!instr->ctx.data.len)
2492 break;
2493 buf_read = instr->ctx.data.buf.in;
2494 buf_len = instr->ctx.data.len;
2495 nbufs++;
2496
2497 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2498 &direct);
2499 break;
2500 }
2501
2502 if (!desc) {
2503 ret = -ENXIO;
2504 goto unmap;
2505 }
2506 }
2507
2508 dev_dbg(this->dev, "%s setup done\n", __func__);
2509
2510 if (nbufs > 1) {
2511 dev_err(this->dev, "Multiple data instructions not supported\n");
2512 ret = -EINVAL;
2513 goto unmap;
2514 }
2515
2516 if (this->bch) {
2517 writel(this->bch_flashlayout0,
2518 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2519 writel(this->bch_flashlayout1,
2520 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2521 }
2522
2523 desc->callback = dma_irq_callback;
2524 desc->callback_param = this;
2525 dma_completion = &this->dma_done;
2526 bch_completion = NULL;
2527
2528 init_completion(dma_completion);
2529
2530 if (this->bch && buf_read) {
2531 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2532 this->resources.bch_regs + HW_BCH_CTRL_SET);
2533 bch_completion = &this->bch_done;
2534 init_completion(bch_completion);
2535 }
2536
2537 dmaengine_submit(desc);
2538 dma_async_issue_pending(get_dma_chan(this));
2539
2540 to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
2541 if (!to) {
2542 dev_err(this->dev, "DMA timeout, last DMA\n");
2543 gpmi_dump_info(this);
2544 ret = -ETIMEDOUT;
2545 goto unmap;
2546 }
2547
2548 if (this->bch && buf_read) {
2549 to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
2550 if (!to) {
2551 dev_err(this->dev, "BCH timeout, last DMA\n");
2552 gpmi_dump_info(this);
2553 ret = -ETIMEDOUT;
2554 goto unmap;
2555 }
2556 }
2557
2558 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2559 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2560 gpmi_clear_bch(this);
2561
2562 ret = 0;
2563
2564 unmap:
2565 for (i = 0; i < this->ntransfers; i++) {
2566 struct gpmi_transfer *transfer = &this->transfers[i];
2567
2568 if (transfer->direction != DMA_NONE)
2569 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2570 transfer->direction);
2571 }
2572
2573 if (!ret && buf_read && !direct)
2574 memcpy(buf_read, this->data_buffer_dma,
2575 gpmi_raw_len_to_len(this, buf_len));
2576
2577 this->bch = false;
2578
2579 out_pm:
2580 pm_runtime_mark_last_busy(this->dev);
2581 pm_runtime_put_autosuspend(this->dev);
2582
2583 return ret;
2584 }
2585
2586 static const struct nand_controller_ops gpmi_nand_controller_ops = {
2587 .attach_chip = gpmi_nand_attach_chip,
2588 .setup_data_interface = gpmi_setup_data_interface,
2589 .exec_op = gpmi_nfc_exec_op,
2590 };
2591
gpmi_nand_init(struct gpmi_nand_data * this)2592 static int gpmi_nand_init(struct gpmi_nand_data *this)
2593 {
2594 struct nand_chip *chip = &this->nand;
2595 struct mtd_info *mtd = nand_to_mtd(chip);
2596 int ret;
2597
2598 /* init the MTD data structures */
2599 mtd->name = "gpmi-nand";
2600 mtd->dev.parent = this->dev;
2601
2602 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2603 nand_set_controller_data(chip, this);
2604 nand_set_flash_node(chip, this->pdev->dev.of_node);
2605 chip->legacy.block_markbad = gpmi_block_markbad;
2606 chip->badblock_pattern = &gpmi_bbt_descr;
2607 chip->options |= NAND_NO_SUBPAGE_WRITE;
2608
2609 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2610 this->swap_block_mark = !GPMI_IS_MX23(this);
2611
2612 /*
2613 * Allocate a temporary DMA buffer for reading ID in the
2614 * nand_scan_ident().
2615 */
2616 this->bch_geometry.payload_size = 1024;
2617 this->bch_geometry.auxiliary_size = 128;
2618 ret = gpmi_alloc_dma_buffer(this);
2619 if (ret)
2620 return ret;
2621
2622 nand_controller_init(&this->base);
2623 this->base.ops = &gpmi_nand_controller_ops;
2624 chip->controller = &this->base;
2625
2626 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2627 if (ret)
2628 goto err_out;
2629
2630 ret = nand_boot_init(this);
2631 if (ret)
2632 goto err_nand_cleanup;
2633 ret = nand_create_bbt(chip);
2634 if (ret)
2635 goto err_nand_cleanup;
2636
2637 ret = mtd_device_register(mtd, NULL, 0);
2638 if (ret)
2639 goto err_nand_cleanup;
2640 return 0;
2641
2642 err_nand_cleanup:
2643 nand_cleanup(chip);
2644 err_out:
2645 gpmi_free_dma_buffer(this);
2646 return ret;
2647 }
2648
2649 static const struct of_device_id gpmi_nand_id_table[] = {
2650 {
2651 .compatible = "fsl,imx23-gpmi-nand",
2652 .data = &gpmi_devdata_imx23,
2653 }, {
2654 .compatible = "fsl,imx28-gpmi-nand",
2655 .data = &gpmi_devdata_imx28,
2656 }, {
2657 .compatible = "fsl,imx6q-gpmi-nand",
2658 .data = &gpmi_devdata_imx6q,
2659 }, {
2660 .compatible = "fsl,imx6sx-gpmi-nand",
2661 .data = &gpmi_devdata_imx6sx,
2662 }, {
2663 .compatible = "fsl,imx7d-gpmi-nand",
2664 .data = &gpmi_devdata_imx7d,
2665 }, {}
2666 };
2667 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2668
gpmi_nand_probe(struct platform_device * pdev)2669 static int gpmi_nand_probe(struct platform_device *pdev)
2670 {
2671 struct gpmi_nand_data *this;
2672 const struct of_device_id *of_id;
2673 int ret;
2674
2675 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2676 if (!this)
2677 return -ENOMEM;
2678
2679 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2680 if (of_id) {
2681 this->devdata = of_id->data;
2682 } else {
2683 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2684 return -ENODEV;
2685 }
2686
2687 platform_set_drvdata(pdev, this);
2688 this->pdev = pdev;
2689 this->dev = &pdev->dev;
2690
2691 ret = acquire_resources(this);
2692 if (ret)
2693 goto exit_acquire_resources;
2694
2695 ret = __gpmi_enable_clk(this, true);
2696 if (ret)
2697 goto exit_nfc_init;
2698
2699 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2700 pm_runtime_use_autosuspend(&pdev->dev);
2701 pm_runtime_set_active(&pdev->dev);
2702 pm_runtime_enable(&pdev->dev);
2703 pm_runtime_get_sync(&pdev->dev);
2704
2705 ret = gpmi_init(this);
2706 if (ret)
2707 goto exit_nfc_init;
2708
2709 ret = gpmi_nand_init(this);
2710 if (ret)
2711 goto exit_nfc_init;
2712
2713 pm_runtime_mark_last_busy(&pdev->dev);
2714 pm_runtime_put_autosuspend(&pdev->dev);
2715
2716 dev_info(this->dev, "driver registered.\n");
2717
2718 return 0;
2719
2720 exit_nfc_init:
2721 pm_runtime_put(&pdev->dev);
2722 pm_runtime_disable(&pdev->dev);
2723 release_resources(this);
2724 exit_acquire_resources:
2725
2726 return ret;
2727 }
2728
gpmi_nand_remove(struct platform_device * pdev)2729 static int gpmi_nand_remove(struct platform_device *pdev)
2730 {
2731 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2732
2733 pm_runtime_put_sync(&pdev->dev);
2734 pm_runtime_disable(&pdev->dev);
2735
2736 nand_release(&this->nand);
2737 gpmi_free_dma_buffer(this);
2738 release_resources(this);
2739 return 0;
2740 }
2741
2742 #ifdef CONFIG_PM_SLEEP
gpmi_pm_suspend(struct device * dev)2743 static int gpmi_pm_suspend(struct device *dev)
2744 {
2745 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2746
2747 release_dma_channels(this);
2748 return 0;
2749 }
2750
gpmi_pm_resume(struct device * dev)2751 static int gpmi_pm_resume(struct device *dev)
2752 {
2753 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2754 int ret;
2755
2756 ret = acquire_dma_channels(this);
2757 if (ret < 0)
2758 return ret;
2759
2760 /* re-init the GPMI registers */
2761 ret = gpmi_init(this);
2762 if (ret) {
2763 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2764 return ret;
2765 }
2766
2767 /* Set flag to get timing setup restored for next exec_op */
2768 if (this->hw.clk_rate)
2769 this->hw.must_apply_timings = true;
2770
2771 /* re-init the BCH registers */
2772 ret = bch_set_geometry(this);
2773 if (ret) {
2774 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2775 return ret;
2776 }
2777
2778 return 0;
2779 }
2780 #endif /* CONFIG_PM_SLEEP */
2781
gpmi_runtime_suspend(struct device * dev)2782 static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2783 {
2784 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2785
2786 return __gpmi_enable_clk(this, false);
2787 }
2788
gpmi_runtime_resume(struct device * dev)2789 static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2790 {
2791 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2792
2793 return __gpmi_enable_clk(this, true);
2794 }
2795
2796 static const struct dev_pm_ops gpmi_pm_ops = {
2797 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2798 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2799 };
2800
2801 static struct platform_driver gpmi_nand_driver = {
2802 .driver = {
2803 .name = "gpmi-nand",
2804 .pm = &gpmi_pm_ops,
2805 .of_match_table = gpmi_nand_id_table,
2806 },
2807 .probe = gpmi_nand_probe,
2808 .remove = gpmi_nand_remove,
2809 };
2810 module_platform_driver(gpmi_nand_driver);
2811
2812 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2813 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2814 MODULE_LICENSE("GPL");
2815