• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Freescale GPMI NAND Flash Driver
3  *
4  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
5  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 
24 #include "gpmi-nand.h"
25 #include "gpmi-regs.h"
26 #include "bch-regs.h"
27 
28 static struct timing_threshod timing_default_threshold = {
29 	.max_data_setup_cycles       = (BM_GPMI_TIMING0_DATA_SETUP >>
30 						BP_GPMI_TIMING0_DATA_SETUP),
31 	.internal_data_setup_in_ns   = 0,
32 	.max_sample_delay_factor     = (BM_GPMI_CTRL1_RDN_DELAY >>
33 						BP_GPMI_CTRL1_RDN_DELAY),
34 	.max_dll_clock_period_in_ns  = 32,
35 	.max_dll_delay_in_ns         = 16,
36 };
37 
38 #define MXS_SET_ADDR		0x4
39 #define MXS_CLR_ADDR		0x8
40 /*
41  * Clear the bit and poll it cleared.  This is usually called with
42  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
43  * (bit 30).
44  */
clear_poll_bit(void __iomem * addr,u32 mask)45 static int clear_poll_bit(void __iomem *addr, u32 mask)
46 {
47 	int timeout = 0x400;
48 
49 	/* clear the bit */
50 	writel(mask, addr + MXS_CLR_ADDR);
51 
52 	/*
53 	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
54 	 * recommends to wait 1us.
55 	 */
56 	udelay(1);
57 
58 	/* poll the bit becoming clear */
59 	while ((readl(addr) & mask) && --timeout)
60 		/* nothing */;
61 
62 	return !timeout;
63 }
64 
65 #define MODULE_CLKGATE		(1 << 30)
66 #define MODULE_SFTRST		(1 << 31)
67 /*
68  * The current mxs_reset_block() will do two things:
69  *  [1] enable the module.
70  *  [2] reset the module.
71  *
72  * In most of the cases, it's ok.
73  * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
74  * If you try to soft reset the BCH block, it becomes unusable until
75  * the next hard reset. This case occurs in the NAND boot mode. When the board
76  * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
77  * So If the driver tries to reset the BCH again, the BCH will not work anymore.
78  * You will see a DMA timeout in this case. The bug has been fixed
79  * in the following chips, such as MX28.
80  *
81  * To avoid this bug, just add a new parameter `just_enable` for
82  * the mxs_reset_block(), and rewrite it here.
83  */
gpmi_reset_block(void __iomem * reset_addr,bool just_enable)84 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
85 {
86 	int ret;
87 	int timeout = 0x400;
88 
89 	/* clear and poll SFTRST */
90 	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
91 	if (unlikely(ret))
92 		goto error;
93 
94 	/* clear CLKGATE */
95 	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
96 
97 	if (!just_enable) {
98 		/* set SFTRST to reset the block */
99 		writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
100 		udelay(1);
101 
102 		/* poll CLKGATE becoming set */
103 		while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
104 			/* nothing */;
105 		if (unlikely(!timeout))
106 			goto error;
107 	}
108 
109 	/* clear and poll SFTRST */
110 	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
111 	if (unlikely(ret))
112 		goto error;
113 
114 	/* clear and poll CLKGATE */
115 	ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
116 	if (unlikely(ret))
117 		goto error;
118 
119 	return 0;
120 
121 error:
122 	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
123 	return -ETIMEDOUT;
124 }
125 
__gpmi_enable_clk(struct gpmi_nand_data * this,bool v)126 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
127 {
128 	struct clk *clk;
129 	int ret;
130 	int i;
131 
132 	for (i = 0; i < GPMI_CLK_MAX; i++) {
133 		clk = this->resources.clock[i];
134 		if (!clk)
135 			break;
136 
137 		if (v) {
138 			ret = clk_prepare_enable(clk);
139 			if (ret)
140 				goto err_clk;
141 		} else {
142 			clk_disable_unprepare(clk);
143 		}
144 	}
145 	return 0;
146 
147 err_clk:
148 	for (; i > 0; i--)
149 		clk_disable_unprepare(this->resources.clock[i - 1]);
150 	return ret;
151 }
152 
153 #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
154 #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
155 
gpmi_init(struct gpmi_nand_data * this)156 int gpmi_init(struct gpmi_nand_data *this)
157 {
158 	struct resources *r = &this->resources;
159 	int ret;
160 
161 	ret = gpmi_enable_clk(this);
162 	if (ret)
163 		goto err_out;
164 	ret = gpmi_reset_block(r->gpmi_regs, false);
165 	if (ret)
166 		goto err_out;
167 
168 	/*
169 	 * Reset BCH here, too. We got failures otherwise :(
170 	 * See later BCH reset for explanation of MX23 handling
171 	 */
172 	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
173 	if (ret)
174 		goto err_out;
175 
176 
177 	/* Choose NAND mode. */
178 	writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
179 
180 	/* Set the IRQ polarity. */
181 	writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
182 				r->gpmi_regs + HW_GPMI_CTRL1_SET);
183 
184 	/* Disable Write-Protection. */
185 	writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
186 
187 	/* Select BCH ECC. */
188 	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
189 
190 	gpmi_disable_clk(this);
191 	return 0;
192 err_out:
193 	return ret;
194 }
195 
196 /* This function is very useful. It is called only when the bug occur. */
gpmi_dump_info(struct gpmi_nand_data * this)197 void gpmi_dump_info(struct gpmi_nand_data *this)
198 {
199 	struct resources *r = &this->resources;
200 	struct bch_geometry *geo = &this->bch_geometry;
201 	u32 reg;
202 	int i;
203 
204 	pr_err("Show GPMI registers :\n");
205 	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
206 		reg = readl(r->gpmi_regs + i * 0x10);
207 		pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
208 	}
209 
210 	/* start to print out the BCH info */
211 	pr_err("Show BCH registers :\n");
212 	for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
213 		reg = readl(r->bch_regs + i * 0x10);
214 		pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
215 	}
216 	pr_err("BCH Geometry :\n");
217 	pr_err("GF length              : %u\n", geo->gf_len);
218 	pr_err("ECC Strength           : %u\n", geo->ecc_strength);
219 	pr_err("Page Size in Bytes     : %u\n", geo->page_size);
220 	pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
221 	pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
222 	pr_err("ECC Chunk Count        : %u\n", geo->ecc_chunk_count);
223 	pr_err("Payload Size in Bytes  : %u\n", geo->payload_size);
224 	pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
225 	pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
226 	pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
227 	pr_err("Block Mark Bit Offset  : %u\n", geo->block_mark_bit_offset);
228 }
229 
230 /* Configures the geometry for BCH.  */
bch_set_geometry(struct gpmi_nand_data * this)231 int bch_set_geometry(struct gpmi_nand_data *this)
232 {
233 	struct resources *r = &this->resources;
234 	struct bch_geometry *bch_geo = &this->bch_geometry;
235 	unsigned int block_count;
236 	unsigned int block_size;
237 	unsigned int metadata_size;
238 	unsigned int ecc_strength;
239 	unsigned int page_size;
240 	unsigned int gf_len;
241 	int ret;
242 
243 	if (common_nfc_set_geometry(this))
244 		return !0;
245 
246 	block_count   = bch_geo->ecc_chunk_count - 1;
247 	block_size    = bch_geo->ecc_chunk_size;
248 	metadata_size = bch_geo->metadata_size;
249 	ecc_strength  = bch_geo->ecc_strength >> 1;
250 	page_size     = bch_geo->page_size;
251 	gf_len        = bch_geo->gf_len;
252 
253 	ret = gpmi_enable_clk(this);
254 	if (ret)
255 		goto err_out;
256 
257 	/*
258 	* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
259 	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
260 	* On the other hand, the MX28 needs the reset, because one case has been
261 	* seen where the BCH produced ECC errors constantly after 10000
262 	* consecutive reboots. The latter case has not been seen on the MX23 yet,
263 	* still we don't know if it could happen there as well.
264 	*/
265 	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
266 	if (ret)
267 		goto err_out;
268 
269 	/* Configure layout 0. */
270 	writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
271 			| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
272 			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
273 			| BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
274 			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
275 			r->bch_regs + HW_BCH_FLASH0LAYOUT0);
276 
277 	writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
278 			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
279 			| BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
280 			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
281 			r->bch_regs + HW_BCH_FLASH0LAYOUT1);
282 
283 	/* Set *all* chip selects to use layout 0. */
284 	writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
285 
286 	/* Enable interrupts. */
287 	writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
288 				r->bch_regs + HW_BCH_CTRL_SET);
289 
290 	gpmi_disable_clk(this);
291 	return 0;
292 err_out:
293 	return ret;
294 }
295 
296 /* Converts time in nanoseconds to cycles. */
ns_to_cycles(unsigned int time,unsigned int period,unsigned int min)297 static unsigned int ns_to_cycles(unsigned int time,
298 			unsigned int period, unsigned int min)
299 {
300 	unsigned int k;
301 
302 	k = (time + period - 1) / period;
303 	return max(k, min);
304 }
305 
306 #define DEF_MIN_PROP_DELAY	5
307 #define DEF_MAX_PROP_DELAY	9
308 /* Apply timing to current hardware conditions. */
gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data * this,struct gpmi_nfc_hardware_timing * hw)309 static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
310 					struct gpmi_nfc_hardware_timing *hw)
311 {
312 	struct timing_threshod *nfc = &timing_default_threshold;
313 	struct resources *r = &this->resources;
314 	struct nand_chip *nand = &this->nand;
315 	struct nand_timing target = this->timing;
316 	bool improved_timing_is_available;
317 	unsigned long clock_frequency_in_hz;
318 	unsigned int clock_period_in_ns;
319 	bool dll_use_half_periods;
320 	unsigned int dll_delay_shift;
321 	unsigned int max_sample_delay_in_ns;
322 	unsigned int address_setup_in_cycles;
323 	unsigned int data_setup_in_ns;
324 	unsigned int data_setup_in_cycles;
325 	unsigned int data_hold_in_cycles;
326 	int ideal_sample_delay_in_ns;
327 	unsigned int sample_delay_factor;
328 	int tEYE;
329 	unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
330 	unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
331 
332 	/*
333 	 * If there are multiple chips, we need to relax the timings to allow
334 	 * for signal distortion due to higher capacitance.
335 	 */
336 	if (nand->numchips > 2) {
337 		target.data_setup_in_ns    += 10;
338 		target.data_hold_in_ns     += 10;
339 		target.address_setup_in_ns += 10;
340 	} else if (nand->numchips > 1) {
341 		target.data_setup_in_ns    += 5;
342 		target.data_hold_in_ns     += 5;
343 		target.address_setup_in_ns += 5;
344 	}
345 
346 	/* Check if improved timing information is available. */
347 	improved_timing_is_available =
348 		(target.tREA_in_ns  >= 0) &&
349 		(target.tRLOH_in_ns >= 0) &&
350 		(target.tRHOH_in_ns >= 0) ;
351 
352 	/* Inspect the clock. */
353 	nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
354 	clock_frequency_in_hz = nfc->clock_frequency_in_hz;
355 	clock_period_in_ns    = NSEC_PER_SEC / clock_frequency_in_hz;
356 
357 	/*
358 	 * The NFC quantizes setup and hold parameters in terms of clock cycles.
359 	 * Here, we quantize the setup and hold timing parameters to the
360 	 * next-highest clock period to make sure we apply at least the
361 	 * specified times.
362 	 *
363 	 * For data setup and data hold, the hardware interprets a value of zero
364 	 * as the largest possible delay. This is not what's intended by a zero
365 	 * in the input parameter, so we impose a minimum of one cycle.
366 	 */
367 	data_setup_in_cycles    = ns_to_cycles(target.data_setup_in_ns,
368 							clock_period_in_ns, 1);
369 	data_hold_in_cycles     = ns_to_cycles(target.data_hold_in_ns,
370 							clock_period_in_ns, 1);
371 	address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
372 							clock_period_in_ns, 0);
373 
374 	/*
375 	 * The clock's period affects the sample delay in a number of ways:
376 	 *
377 	 * (1) The NFC HAL tells us the maximum clock period the sample delay
378 	 *     DLL can tolerate. If the clock period is greater than half that
379 	 *     maximum, we must configure the DLL to be driven by half periods.
380 	 *
381 	 * (2) We need to convert from an ideal sample delay, in ns, to a
382 	 *     "sample delay factor," which the NFC uses. This factor depends on
383 	 *     whether we're driving the DLL with full or half periods.
384 	 *     Paraphrasing the reference manual:
385 	 *
386 	 *         AD = SDF x 0.125 x RP
387 	 *
388 	 * where:
389 	 *
390 	 *     AD   is the applied delay, in ns.
391 	 *     SDF  is the sample delay factor, which is dimensionless.
392 	 *     RP   is the reference period, in ns, which is a full clock period
393 	 *          if the DLL is being driven by full periods, or half that if
394 	 *          the DLL is being driven by half periods.
395 	 *
396 	 * Let's re-arrange this in a way that's more useful to us:
397 	 *
398 	 *                        8
399 	 *         SDF  =  AD x ----
400 	 *                       RP
401 	 *
402 	 * The reference period is either the clock period or half that, so this
403 	 * is:
404 	 *
405 	 *                        8       AD x DDF
406 	 *         SDF  =  AD x -----  =  --------
407 	 *                      f x P        P
408 	 *
409 	 * where:
410 	 *
411 	 *       f  is 1 or 1/2, depending on how we're driving the DLL.
412 	 *       P  is the clock period.
413 	 *     DDF  is the DLL Delay Factor, a dimensionless value that
414 	 *          incorporates all the constants in the conversion.
415 	 *
416 	 * DDF will be either 8 or 16, both of which are powers of two. We can
417 	 * reduce the cost of this conversion by using bit shifts instead of
418 	 * multiplication or division. Thus:
419 	 *
420 	 *                 AD << DDS
421 	 *         SDF  =  ---------
422 	 *                     P
423 	 *
424 	 *     or
425 	 *
426 	 *         AD  =  (SDF >> DDS) x P
427 	 *
428 	 * where:
429 	 *
430 	 *     DDS  is the DLL Delay Shift, the logarithm to base 2 of the DDF.
431 	 */
432 	if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
433 		dll_use_half_periods = true;
434 		dll_delay_shift      = 3 + 1;
435 	} else {
436 		dll_use_half_periods = false;
437 		dll_delay_shift      = 3;
438 	}
439 
440 	/*
441 	 * Compute the maximum sample delay the NFC allows, under current
442 	 * conditions. If the clock is running too slowly, no sample delay is
443 	 * possible.
444 	 */
445 	if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
446 		max_sample_delay_in_ns = 0;
447 	else {
448 		/*
449 		 * Compute the delay implied by the largest sample delay factor
450 		 * the NFC allows.
451 		 */
452 		max_sample_delay_in_ns =
453 			(nfc->max_sample_delay_factor * clock_period_in_ns) >>
454 								dll_delay_shift;
455 
456 		/*
457 		 * Check if the implied sample delay larger than the NFC
458 		 * actually allows.
459 		 */
460 		if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
461 			max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
462 	}
463 
464 	/*
465 	 * Check if improved timing information is available. If not, we have to
466 	 * use a less-sophisticated algorithm.
467 	 */
468 	if (!improved_timing_is_available) {
469 		/*
470 		 * Fold the read setup time required by the NFC into the ideal
471 		 * sample delay.
472 		 */
473 		ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
474 						nfc->internal_data_setup_in_ns;
475 
476 		/*
477 		 * The ideal sample delay may be greater than the maximum
478 		 * allowed by the NFC. If so, we can trade off sample delay time
479 		 * for more data setup time.
480 		 *
481 		 * In each iteration of the following loop, we add a cycle to
482 		 * the data setup time and subtract a corresponding amount from
483 		 * the sample delay until we've satisified the constraints or
484 		 * can't do any better.
485 		 */
486 		while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
487 			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
488 
489 			data_setup_in_cycles++;
490 			ideal_sample_delay_in_ns -= clock_period_in_ns;
491 
492 			if (ideal_sample_delay_in_ns < 0)
493 				ideal_sample_delay_in_ns = 0;
494 
495 		}
496 
497 		/*
498 		 * Compute the sample delay factor that corresponds most closely
499 		 * to the ideal sample delay. If the result is too large for the
500 		 * NFC, use the maximum value.
501 		 *
502 		 * Notice that we use the ns_to_cycles function to compute the
503 		 * sample delay factor. We do this because the form of the
504 		 * computation is the same as that for calculating cycles.
505 		 */
506 		sample_delay_factor =
507 			ns_to_cycles(
508 				ideal_sample_delay_in_ns << dll_delay_shift,
509 							clock_period_in_ns, 0);
510 
511 		if (sample_delay_factor > nfc->max_sample_delay_factor)
512 			sample_delay_factor = nfc->max_sample_delay_factor;
513 
514 		/* Skip to the part where we return our results. */
515 		goto return_results;
516 	}
517 
518 	/*
519 	 * If control arrives here, we have more detailed timing information,
520 	 * so we can use a better algorithm.
521 	 */
522 
523 	/*
524 	 * Fold the read setup time required by the NFC into the maximum
525 	 * propagation delay.
526 	 */
527 	max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
528 
529 	/*
530 	 * Earlier, we computed the number of clock cycles required to satisfy
531 	 * the data setup time. Now, we need to know the actual nanoseconds.
532 	 */
533 	data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
534 
535 	/*
536 	 * Compute tEYE, the width of the data eye when reading from the NAND
537 	 * Flash. The eye width is fundamentally determined by the data setup
538 	 * time, perturbed by propagation delays and some characteristics of the
539 	 * NAND Flash device.
540 	 *
541 	 * start of the eye = max_prop_delay + tREA
542 	 * end of the eye   = min_prop_delay + tRHOH + data_setup
543 	 */
544 	tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
545 							(int)data_setup_in_ns;
546 
547 	tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
548 
549 	/*
550 	 * The eye must be open. If it's not, we can try to open it by
551 	 * increasing its main forcer, the data setup time.
552 	 *
553 	 * In each iteration of the following loop, we increase the data setup
554 	 * time by a single clock cycle. We do this until either the eye is
555 	 * open or we run into NFC limits.
556 	 */
557 	while ((tEYE <= 0) &&
558 			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
559 		/* Give a cycle to data setup. */
560 		data_setup_in_cycles++;
561 		/* Synchronize the data setup time with the cycles. */
562 		data_setup_in_ns += clock_period_in_ns;
563 		/* Adjust tEYE accordingly. */
564 		tEYE += clock_period_in_ns;
565 	}
566 
567 	/*
568 	 * When control arrives here, the eye is open. The ideal time to sample
569 	 * the data is in the center of the eye:
570 	 *
571 	 *     end of the eye + start of the eye
572 	 *     ---------------------------------  -  data_setup
573 	 *                    2
574 	 *
575 	 * After some algebra, this simplifies to the code immediately below.
576 	 */
577 	ideal_sample_delay_in_ns =
578 		((int)max_prop_delay_in_ns +
579 			(int)target.tREA_in_ns +
580 				(int)min_prop_delay_in_ns +
581 					(int)target.tRHOH_in_ns -
582 						(int)data_setup_in_ns) >> 1;
583 
584 	/*
585 	 * The following figure illustrates some aspects of a NAND Flash read:
586 	 *
587 	 *
588 	 *           __                   _____________________________________
589 	 * RDN         \_________________/
590 	 *
591 	 *                                         <---- tEYE ----->
592 	 *                                        /-----------------\
593 	 * Read Data ----------------------------<                   >---------
594 	 *                                        \-----------------/
595 	 *             ^                 ^                 ^              ^
596 	 *             |                 |                 |              |
597 	 *             |<--Data Setup -->|<--Delay Time -->|              |
598 	 *             |                 |                 |              |
599 	 *             |                 |                                |
600 	 *             |                 |<--   Quantized Delay Time   -->|
601 	 *             |                 |                                |
602 	 *
603 	 *
604 	 * We have some issues we must now address:
605 	 *
606 	 * (1) The *ideal* sample delay time must not be negative. If it is, we
607 	 *     jam it to zero.
608 	 *
609 	 * (2) The *ideal* sample delay time must not be greater than that
610 	 *     allowed by the NFC. If it is, we can increase the data setup
611 	 *     time, which will reduce the delay between the end of the data
612 	 *     setup and the center of the eye. It will also make the eye
613 	 *     larger, which might help with the next issue...
614 	 *
615 	 * (3) The *quantized* sample delay time must not fall either before the
616 	 *     eye opens or after it closes (the latter is the problem
617 	 *     illustrated in the above figure).
618 	 */
619 
620 	/* Jam a negative ideal sample delay to zero. */
621 	if (ideal_sample_delay_in_ns < 0)
622 		ideal_sample_delay_in_ns = 0;
623 
624 	/*
625 	 * Extend the data setup as needed to reduce the ideal sample delay
626 	 * below the maximum permitted by the NFC.
627 	 */
628 	while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
629 			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
630 
631 		/* Give a cycle to data setup. */
632 		data_setup_in_cycles++;
633 		/* Synchronize the data setup time with the cycles. */
634 		data_setup_in_ns += clock_period_in_ns;
635 		/* Adjust tEYE accordingly. */
636 		tEYE += clock_period_in_ns;
637 
638 		/*
639 		 * Decrease the ideal sample delay by one half cycle, to keep it
640 		 * in the middle of the eye.
641 		 */
642 		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
643 
644 		/* Jam a negative ideal sample delay to zero. */
645 		if (ideal_sample_delay_in_ns < 0)
646 			ideal_sample_delay_in_ns = 0;
647 	}
648 
649 	/*
650 	 * Compute the sample delay factor that corresponds to the ideal sample
651 	 * delay. If the result is too large, then use the maximum allowed
652 	 * value.
653 	 *
654 	 * Notice that we use the ns_to_cycles function to compute the sample
655 	 * delay factor. We do this because the form of the computation is the
656 	 * same as that for calculating cycles.
657 	 */
658 	sample_delay_factor =
659 		ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
660 							clock_period_in_ns, 0);
661 
662 	if (sample_delay_factor > nfc->max_sample_delay_factor)
663 		sample_delay_factor = nfc->max_sample_delay_factor;
664 
665 	/*
666 	 * These macros conveniently encapsulate a computation we'll use to
667 	 * continuously evaluate whether or not the data sample delay is inside
668 	 * the eye.
669 	 */
670 	#define IDEAL_DELAY  ((int) ideal_sample_delay_in_ns)
671 
672 	#define QUANTIZED_DELAY  \
673 		((int) ((sample_delay_factor * clock_period_in_ns) >> \
674 							dll_delay_shift))
675 
676 	#define DELAY_ERROR  (abs(QUANTIZED_DELAY - IDEAL_DELAY))
677 
678 	#define SAMPLE_IS_NOT_WITHIN_THE_EYE  (DELAY_ERROR > (tEYE >> 1))
679 
680 	/*
681 	 * While the quantized sample time falls outside the eye, reduce the
682 	 * sample delay or extend the data setup to move the sampling point back
683 	 * toward the eye. Do not allow the number of data setup cycles to
684 	 * exceed the maximum allowed by the NFC.
685 	 */
686 	while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
687 			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
688 		/*
689 		 * If control arrives here, the quantized sample delay falls
690 		 * outside the eye. Check if it's before the eye opens, or after
691 		 * the eye closes.
692 		 */
693 		if (QUANTIZED_DELAY > IDEAL_DELAY) {
694 			/*
695 			 * If control arrives here, the quantized sample delay
696 			 * falls after the eye closes. Decrease the quantized
697 			 * delay time and then go back to re-evaluate.
698 			 */
699 			if (sample_delay_factor != 0)
700 				sample_delay_factor--;
701 			continue;
702 		}
703 
704 		/*
705 		 * If control arrives here, the quantized sample delay falls
706 		 * before the eye opens. Shift the sample point by increasing
707 		 * data setup time. This will also make the eye larger.
708 		 */
709 
710 		/* Give a cycle to data setup. */
711 		data_setup_in_cycles++;
712 		/* Synchronize the data setup time with the cycles. */
713 		data_setup_in_ns += clock_period_in_ns;
714 		/* Adjust tEYE accordingly. */
715 		tEYE += clock_period_in_ns;
716 
717 		/*
718 		 * Decrease the ideal sample delay by one half cycle, to keep it
719 		 * in the middle of the eye.
720 		 */
721 		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
722 
723 		/* ...and one less period for the delay time. */
724 		ideal_sample_delay_in_ns -= clock_period_in_ns;
725 
726 		/* Jam a negative ideal sample delay to zero. */
727 		if (ideal_sample_delay_in_ns < 0)
728 			ideal_sample_delay_in_ns = 0;
729 
730 		/*
731 		 * We have a new ideal sample delay, so re-compute the quantized
732 		 * delay.
733 		 */
734 		sample_delay_factor =
735 			ns_to_cycles(
736 				ideal_sample_delay_in_ns << dll_delay_shift,
737 							clock_period_in_ns, 0);
738 
739 		if (sample_delay_factor > nfc->max_sample_delay_factor)
740 			sample_delay_factor = nfc->max_sample_delay_factor;
741 	}
742 
743 	/* Control arrives here when we're ready to return our results. */
744 return_results:
745 	hw->data_setup_in_cycles    = data_setup_in_cycles;
746 	hw->data_hold_in_cycles     = data_hold_in_cycles;
747 	hw->address_setup_in_cycles = address_setup_in_cycles;
748 	hw->use_half_periods        = dll_use_half_periods;
749 	hw->sample_delay_factor     = sample_delay_factor;
750 	hw->device_busy_timeout     = GPMI_DEFAULT_BUSY_TIMEOUT;
751 	hw->wrn_dly_sel             = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
752 
753 	/* Return success. */
754 	return 0;
755 }
756 
757 /*
758  * <1> Firstly, we should know what's the GPMI-clock means.
759  *     The GPMI-clock is the internal clock in the gpmi nand controller.
760  *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
761  *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
762  *
763  * <2> Secondly, we should know what's the frequency on the nand chip pins.
764  *     The frequency on the nand chip pins is derived from the GPMI-clock.
765  *     We can get it from the following equation:
766  *
767  *         F = G / (DS + DH)
768  *
769  *         F  : the frequency on the nand chip pins.
770  *         G  : the GPMI clock, such as 100MHz.
771  *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
772  *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
773  *
774  * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
775  *     the nand EDO(extended Data Out) timing could be applied.
776  *     The GPMI implements a feedback read strobe to sample the read data.
777  *     The feedback read strobe can be delayed to support the nand EDO timing
778  *     where the read strobe may deasserts before the read data is valid, and
779  *     read data is valid for some time after read strobe.
780  *
781  *     The following figure illustrates some aspects of a NAND Flash read:
782  *
783  *                   |<---tREA---->|
784  *                   |             |
785  *                   |         |   |
786  *                   |<--tRP-->|   |
787  *                   |         |   |
788  *                  __          ___|__________________________________
789  *     RDN            \________/   |
790  *                                 |
791  *                                 /---------\
792  *     Read Data    --------------<           >---------
793  *                                 \---------/
794  *                                |     |
795  *                                |<-D->|
796  *     FeedbackRDN  ________             ____________
797  *                          \___________/
798  *
799  *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
800  *
801  *
802  * <4> Now, we begin to describe how to compute the right RDN_DELAY.
803  *
804  *  4.1) From the aspect of the nand chip pins:
805  *        Delay = (tREA + C - tRP)               {1}
806  *
807  *        tREA : the maximum read access time. From the ONFI nand standards,
808  *               we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
809  *               Please check it in : www.onfi.org
810  *        C    : a constant for adjust the delay. default is 4.
811  *        tRP  : the read pulse width.
812  *               Specified by the HW_GPMI_TIMING0:DATA_SETUP:
813  *                    tRP = (GPMI-clock-period) * DATA_SETUP
814  *
815  *  4.2) From the aspect of the GPMI nand controller:
816  *         Delay = RDN_DELAY * 0.125 * RP        {2}
817  *
818  *         RP   : the DLL reference period.
819  *            if (GPMI-clock-period > DLL_THRETHOLD)
820  *                   RP = GPMI-clock-period / 2;
821  *            else
822  *                   RP = GPMI-clock-period;
823  *
824  *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
825  *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
826  *            is 16ns, but in mx6q, we use 12ns.
827  *
828  *  4.3) since {1} equals {2}, we get:
829  *
830  *                    (tREA + 4 - tRP) * 8
831  *         RDN_DELAY = ---------------------     {3}
832  *                           RP
833  *
834  *  4.4) We only support the fastest asynchronous mode of ONFI nand.
835  *       For some ONFI nand, the mode 4 is the fastest mode;
836  *       while for some ONFI nand, the mode 5 is the fastest mode.
837  *       So we only support the mode 4 and mode 5. It is no need to
838  *       support other modes.
839  */
gpmi_compute_edo_timing(struct gpmi_nand_data * this,struct gpmi_nfc_hardware_timing * hw)840 static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
841 			struct gpmi_nfc_hardware_timing *hw)
842 {
843 	struct resources *r = &this->resources;
844 	unsigned long rate = clk_get_rate(r->clock[0]);
845 	int mode = this->timing_mode;
846 	int dll_threshold = 16; /* in ns */
847 	unsigned long delay;
848 	unsigned long clk_period;
849 	int t_rea;
850 	int c = 4;
851 	int t_rp;
852 	int rp;
853 
854 	/*
855 	 * [1] for GPMI_HW_GPMI_TIMING0:
856 	 *     The async mode requires 40MHz for mode 4, 50MHz for mode 5.
857 	 *     The GPMI can support 100MHz at most. So if we want to
858 	 *     get the 40MHz or 50MHz, we have to set DS=1, DH=1.
859 	 *     Set the ADDRESS_SETUP to 0 in mode 4.
860 	 */
861 	hw->data_setup_in_cycles = 1;
862 	hw->data_hold_in_cycles = 1;
863 	hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
864 
865 	/* [2] for GPMI_HW_GPMI_TIMING1 */
866 	hw->device_busy_timeout = 0x9000;
867 
868 	/* [3] for GPMI_HW_GPMI_CTRL1 */
869 	hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
870 
871 	if (GPMI_IS_MX6Q(this))
872 		dll_threshold = 12;
873 
874 	/*
875 	 * Enlarge 10 times for the numerator and denominator in {3}.
876 	 * This make us to get more accurate result.
877 	 */
878 	clk_period = NSEC_PER_SEC / (rate / 10);
879 	dll_threshold *= 10;
880 	t_rea = ((mode == 5) ? 16 : 20) * 10;
881 	c *= 10;
882 
883 	t_rp = clk_period * 1; /* DATA_SETUP is 1 */
884 
885 	if (clk_period > dll_threshold) {
886 		hw->use_half_periods = 1;
887 		rp = clk_period / 2;
888 	} else {
889 		hw->use_half_periods = 0;
890 		rp = clk_period;
891 	}
892 
893 	/*
894 	 * Multiply the numerator with 10, we could do a round off:
895 	 *      7.8 round up to 8; 7.4 round down to 7.
896 	 */
897 	delay  = (((t_rea + c - t_rp) * 8) * 10) / rp;
898 	delay = (delay + 5) / 10;
899 
900 	hw->sample_delay_factor = delay;
901 }
902 
enable_edo_mode(struct gpmi_nand_data * this,int mode)903 static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
904 {
905 	struct resources  *r = &this->resources;
906 	struct nand_chip *nand = &this->nand;
907 	struct mtd_info	 *mtd = &this->mtd;
908 	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
909 	unsigned long rate;
910 	int ret;
911 
912 	nand->select_chip(mtd, 0);
913 
914 	/* [1] send SET FEATURE commond to NAND */
915 	feature[0] = mode;
916 	ret = nand->onfi_set_features(mtd, nand,
917 				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
918 	if (ret)
919 		goto err_out;
920 
921 	/* [2] send GET FEATURE command to double-check the timing mode */
922 	memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
923 	ret = nand->onfi_get_features(mtd, nand,
924 				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
925 	if (ret || feature[0] != mode)
926 		goto err_out;
927 
928 	nand->select_chip(mtd, -1);
929 
930 	/* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
931 	rate = (mode == 5) ? 100000000 : 80000000;
932 	clk_set_rate(r->clock[0], rate);
933 
934 	/* Let the gpmi_begin() re-compute the timing again. */
935 	this->flags &= ~GPMI_TIMING_INIT_OK;
936 
937 	this->flags |= GPMI_ASYNC_EDO_ENABLED;
938 	this->timing_mode = mode;
939 	dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
940 	return 0;
941 
942 err_out:
943 	nand->select_chip(mtd, -1);
944 	dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
945 	return -EINVAL;
946 }
947 
gpmi_extra_init(struct gpmi_nand_data * this)948 int gpmi_extra_init(struct gpmi_nand_data *this)
949 {
950 	struct nand_chip *chip = &this->nand;
951 
952 	/* Enable the asynchronous EDO feature. */
953 	if (GPMI_IS_MX6Q(this) && chip->onfi_version) {
954 		int mode = onfi_get_async_timing_mode(chip);
955 
956 		/* We only support the timing mode 4 and mode 5. */
957 		if (mode & ONFI_TIMING_MODE_5)
958 			mode = 5;
959 		else if (mode & ONFI_TIMING_MODE_4)
960 			mode = 4;
961 		else
962 			return 0;
963 
964 		return enable_edo_mode(this, mode);
965 	}
966 	return 0;
967 }
968 
969 /* Begin the I/O */
gpmi_begin(struct gpmi_nand_data * this)970 void gpmi_begin(struct gpmi_nand_data *this)
971 {
972 	struct resources *r = &this->resources;
973 	void __iomem *gpmi_regs = r->gpmi_regs;
974 	unsigned int   clock_period_in_ns;
975 	uint32_t       reg;
976 	unsigned int   dll_wait_time_in_us;
977 	struct gpmi_nfc_hardware_timing  hw;
978 	int ret;
979 
980 	/* Enable the clock. */
981 	ret = gpmi_enable_clk(this);
982 	if (ret) {
983 		pr_err("We failed in enable the clk\n");
984 		goto err_out;
985 	}
986 
987 	/* Only initialize the timing once */
988 	if (this->flags & GPMI_TIMING_INIT_OK)
989 		return;
990 	this->flags |= GPMI_TIMING_INIT_OK;
991 
992 	if (this->flags & GPMI_ASYNC_EDO_ENABLED)
993 		gpmi_compute_edo_timing(this, &hw);
994 	else
995 		gpmi_nfc_compute_hardware_timing(this, &hw);
996 
997 	/* [1] Set HW_GPMI_TIMING0 */
998 	reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
999 		BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         |
1000 		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles)       ;
1001 
1002 	writel(reg, gpmi_regs + HW_GPMI_TIMING0);
1003 
1004 	/* [2] Set HW_GPMI_TIMING1 */
1005 	writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
1006 		gpmi_regs + HW_GPMI_TIMING1);
1007 
1008 	/* [3] The following code is to set the HW_GPMI_CTRL1. */
1009 
1010 	/* Set the WRN_DLY_SEL */
1011 	writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
1012 	writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
1013 					gpmi_regs + HW_GPMI_CTRL1_SET);
1014 
1015 	/* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
1016 	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
1017 
1018 	/* Clear out the DLL control fields. */
1019 	reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
1020 	writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
1021 
1022 	/* If no sample delay is called for, return immediately. */
1023 	if (!hw.sample_delay_factor)
1024 		return;
1025 
1026 	/* Set RDN_DELAY or HALF_PERIOD. */
1027 	reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
1028 		| BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
1029 
1030 	writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
1031 
1032 	/* At last, we enable the DLL. */
1033 	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
1034 
1035 	/*
1036 	 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
1037 	 * we can use the GPMI. Calculate the amount of time we need to wait,
1038 	 * in microseconds.
1039 	 */
1040 	clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
1041 	dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
1042 
1043 	if (!dll_wait_time_in_us)
1044 		dll_wait_time_in_us = 1;
1045 
1046 	/* Wait for the DLL to settle. */
1047 	udelay(dll_wait_time_in_us);
1048 
1049 err_out:
1050 	return;
1051 }
1052 
gpmi_end(struct gpmi_nand_data * this)1053 void gpmi_end(struct gpmi_nand_data *this)
1054 {
1055 	gpmi_disable_clk(this);
1056 }
1057 
1058 /* Clears a BCH interrupt. */
gpmi_clear_bch(struct gpmi_nand_data * this)1059 void gpmi_clear_bch(struct gpmi_nand_data *this)
1060 {
1061 	struct resources *r = &this->resources;
1062 	writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
1063 }
1064 
1065 /* Returns the Ready/Busy status of the given chip. */
gpmi_is_ready(struct gpmi_nand_data * this,unsigned chip)1066 int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1067 {
1068 	struct resources *r = &this->resources;
1069 	uint32_t mask = 0;
1070 	uint32_t reg = 0;
1071 
1072 	if (GPMI_IS_MX23(this)) {
1073 		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
1074 		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
1075 	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
1076 		/* MX28 shares the same R/B register as MX6Q. */
1077 		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
1078 		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
1079 	} else
1080 		pr_err("unknow arch.\n");
1081 	return reg & mask;
1082 }
1083 
set_dma_type(struct gpmi_nand_data * this,enum dma_ops_type type)1084 static inline void set_dma_type(struct gpmi_nand_data *this,
1085 					enum dma_ops_type type)
1086 {
1087 	this->last_dma_type = this->dma_type;
1088 	this->dma_type = type;
1089 }
1090 
gpmi_send_command(struct gpmi_nand_data * this)1091 int gpmi_send_command(struct gpmi_nand_data *this)
1092 {
1093 	struct dma_chan *channel = get_dma_chan(this);
1094 	struct dma_async_tx_descriptor *desc;
1095 	struct scatterlist *sgl;
1096 	int chip = this->current_chip;
1097 	u32 pio[3];
1098 
1099 	/* [1] send out the PIO words */
1100 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
1101 		| BM_GPMI_CTRL0_WORD_LENGTH
1102 		| BF_GPMI_CTRL0_CS(chip, this)
1103 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1104 		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
1105 		| BM_GPMI_CTRL0_ADDRESS_INCREMENT
1106 		| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
1107 	pio[1] = pio[2] = 0;
1108 	desc = dmaengine_prep_slave_sg(channel,
1109 					(struct scatterlist *)pio,
1110 					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1111 	if (!desc) {
1112 		pr_err("step 1 error\n");
1113 		return -1;
1114 	}
1115 
1116 	/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
1117 	sgl = &this->cmd_sgl;
1118 
1119 	sg_init_one(sgl, this->cmd_buffer, this->command_length);
1120 	dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
1121 	desc = dmaengine_prep_slave_sg(channel,
1122 				sgl, 1, DMA_MEM_TO_DEV,
1123 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1124 
1125 	if (!desc) {
1126 		pr_err("step 2 error\n");
1127 		return -1;
1128 	}
1129 
1130 	/* [3] submit the DMA */
1131 	set_dma_type(this, DMA_FOR_COMMAND);
1132 	return start_dma_without_bch_irq(this, desc);
1133 }
1134 
gpmi_send_data(struct gpmi_nand_data * this)1135 int gpmi_send_data(struct gpmi_nand_data *this)
1136 {
1137 	struct dma_async_tx_descriptor *desc;
1138 	struct dma_chan *channel = get_dma_chan(this);
1139 	int chip = this->current_chip;
1140 	uint32_t command_mode;
1141 	uint32_t address;
1142 	u32 pio[2];
1143 
1144 	/* [1] PIO */
1145 	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1146 	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1147 
1148 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1149 		| BM_GPMI_CTRL0_WORD_LENGTH
1150 		| BF_GPMI_CTRL0_CS(chip, this)
1151 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1152 		| BF_GPMI_CTRL0_ADDRESS(address)
1153 		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1154 	pio[1] = 0;
1155 	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
1156 					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1157 	if (!desc) {
1158 		pr_err("step 1 error\n");
1159 		return -1;
1160 	}
1161 
1162 	/* [2] send DMA request */
1163 	prepare_data_dma(this, DMA_TO_DEVICE);
1164 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1165 					1, DMA_MEM_TO_DEV,
1166 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1167 	if (!desc) {
1168 		pr_err("step 2 error\n");
1169 		return -1;
1170 	}
1171 	/* [3] submit the DMA */
1172 	set_dma_type(this, DMA_FOR_WRITE_DATA);
1173 	return start_dma_without_bch_irq(this, desc);
1174 }
1175 
gpmi_read_data(struct gpmi_nand_data * this)1176 int gpmi_read_data(struct gpmi_nand_data *this)
1177 {
1178 	struct dma_async_tx_descriptor *desc;
1179 	struct dma_chan *channel = get_dma_chan(this);
1180 	int chip = this->current_chip;
1181 	u32 pio[2];
1182 
1183 	/* [1] : send PIO */
1184 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
1185 		| BM_GPMI_CTRL0_WORD_LENGTH
1186 		| BF_GPMI_CTRL0_CS(chip, this)
1187 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1188 		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
1189 		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1190 	pio[1] = 0;
1191 	desc = dmaengine_prep_slave_sg(channel,
1192 					(struct scatterlist *)pio,
1193 					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1194 	if (!desc) {
1195 		pr_err("step 1 error\n");
1196 		return -1;
1197 	}
1198 
1199 	/* [2] : send DMA request */
1200 	prepare_data_dma(this, DMA_FROM_DEVICE);
1201 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1202 					1, DMA_DEV_TO_MEM,
1203 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1204 	if (!desc) {
1205 		pr_err("step 2 error\n");
1206 		return -1;
1207 	}
1208 
1209 	/* [3] : submit the DMA */
1210 	set_dma_type(this, DMA_FOR_READ_DATA);
1211 	return start_dma_without_bch_irq(this, desc);
1212 }
1213 
gpmi_send_page(struct gpmi_nand_data * this,dma_addr_t payload,dma_addr_t auxiliary)1214 int gpmi_send_page(struct gpmi_nand_data *this,
1215 			dma_addr_t payload, dma_addr_t auxiliary)
1216 {
1217 	struct bch_geometry *geo = &this->bch_geometry;
1218 	uint32_t command_mode;
1219 	uint32_t address;
1220 	uint32_t ecc_command;
1221 	uint32_t buffer_mask;
1222 	struct dma_async_tx_descriptor *desc;
1223 	struct dma_chan *channel = get_dma_chan(this);
1224 	int chip = this->current_chip;
1225 	u32 pio[6];
1226 
1227 	/* A DMA descriptor that does an ECC page read. */
1228 	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1229 	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1230 	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
1231 	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1232 				BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1233 
1234 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1235 		| BM_GPMI_CTRL0_WORD_LENGTH
1236 		| BF_GPMI_CTRL0_CS(chip, this)
1237 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1238 		| BF_GPMI_CTRL0_ADDRESS(address)
1239 		| BF_GPMI_CTRL0_XFER_COUNT(0);
1240 	pio[1] = 0;
1241 	pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1242 		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1243 		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1244 	pio[3] = geo->page_size;
1245 	pio[4] = payload;
1246 	pio[5] = auxiliary;
1247 
1248 	desc = dmaengine_prep_slave_sg(channel,
1249 					(struct scatterlist *)pio,
1250 					ARRAY_SIZE(pio), DMA_TRANS_NONE,
1251 					DMA_CTRL_ACK);
1252 	if (!desc) {
1253 		pr_err("step 2 error\n");
1254 		return -1;
1255 	}
1256 	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
1257 	return start_dma_with_bch_irq(this, desc);
1258 }
1259 
gpmi_read_page(struct gpmi_nand_data * this,dma_addr_t payload,dma_addr_t auxiliary)1260 int gpmi_read_page(struct gpmi_nand_data *this,
1261 				dma_addr_t payload, dma_addr_t auxiliary)
1262 {
1263 	struct bch_geometry *geo = &this->bch_geometry;
1264 	uint32_t command_mode;
1265 	uint32_t address;
1266 	uint32_t ecc_command;
1267 	uint32_t buffer_mask;
1268 	struct dma_async_tx_descriptor *desc;
1269 	struct dma_chan *channel = get_dma_chan(this);
1270 	int chip = this->current_chip;
1271 	u32 pio[6];
1272 
1273 	/* [1] Wait for the chip to report ready. */
1274 	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1275 	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1276 
1277 	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1278 		| BM_GPMI_CTRL0_WORD_LENGTH
1279 		| BF_GPMI_CTRL0_CS(chip, this)
1280 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1281 		| BF_GPMI_CTRL0_ADDRESS(address)
1282 		| BF_GPMI_CTRL0_XFER_COUNT(0);
1283 	pio[1] = 0;
1284 	desc = dmaengine_prep_slave_sg(channel,
1285 				(struct scatterlist *)pio, 2,
1286 				DMA_TRANS_NONE, 0);
1287 	if (!desc) {
1288 		pr_err("step 1 error\n");
1289 		return -1;
1290 	}
1291 
1292 	/* [2] Enable the BCH block and read. */
1293 	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1294 	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1295 	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1296 	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1297 			| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1298 
1299 	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1300 		| BM_GPMI_CTRL0_WORD_LENGTH
1301 		| BF_GPMI_CTRL0_CS(chip, this)
1302 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1303 		| BF_GPMI_CTRL0_ADDRESS(address)
1304 		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1305 
1306 	pio[1] = 0;
1307 	pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
1308 		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1309 		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1310 	pio[3] = geo->page_size;
1311 	pio[4] = payload;
1312 	pio[5] = auxiliary;
1313 	desc = dmaengine_prep_slave_sg(channel,
1314 					(struct scatterlist *)pio,
1315 					ARRAY_SIZE(pio), DMA_TRANS_NONE,
1316 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1317 	if (!desc) {
1318 		pr_err("step 2 error\n");
1319 		return -1;
1320 	}
1321 
1322 	/* [3] Disable the BCH block */
1323 	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1324 	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1325 
1326 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1327 		| BM_GPMI_CTRL0_WORD_LENGTH
1328 		| BF_GPMI_CTRL0_CS(chip, this)
1329 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1330 		| BF_GPMI_CTRL0_ADDRESS(address)
1331 		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1332 	pio[1] = 0;
1333 	pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
1334 	desc = dmaengine_prep_slave_sg(channel,
1335 				(struct scatterlist *)pio, 3,
1336 				DMA_TRANS_NONE,
1337 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1338 	if (!desc) {
1339 		pr_err("step 3 error\n");
1340 		return -1;
1341 	}
1342 
1343 	/* [4] submit the DMA */
1344 	set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1345 	return start_dma_with_bch_irq(this, desc);
1346 }
1347