1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Generic Error-Correcting Code (ECC) engine
4 *
5 * Copyright (C) 2019 Macronix
6 * Author:
7 * Miquèl RAYNAL <miquel.raynal@bootlin.com>
8 *
9 *
10 * This file describes the abstraction of any NAND ECC engine. It has been
11 * designed to fit most cases, including parallel NANDs and SPI-NANDs.
12 *
13 * There are three main situations where instantiating this ECC engine makes
14 * sense:
15 * - external: The ECC engine is outside the NAND pipeline, typically this
16 * is a software ECC engine, or an hardware engine that is
17 * outside the NAND controller pipeline.
18 * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
19 * controller's side. This is the case of most of the raw NAND
20 * controllers. In the pipeline case, the ECC bytes are
21 * generated/data corrected on the fly when a page is
22 * written/read.
23 * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
24 * Some NAND chips can correct themselves the data.
25 *
26 * Besides the initial setup and final cleanups, the interfaces are rather
27 * simple:
28 * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
29 * the I/O request type. In case of software correction or external
30 * engine, this step may involve to derive the ECC bytes and place
31 * them in the OOB area before a write.
32 * - finish: Finish an I/O request. Correct the data in case of a read
33 * request and report the number of corrected bits/uncorrectable
34 * errors. Most likely empty for write operations, unless you have
35 * hardware specific stuff to do, like shutting down the engine to
36 * save power.
37 *
38 * The I/O request should be enclosed in a prepare()/finish() pair of calls
39 * and will behave differently depending on the requested I/O type:
40 * - raw: Correction disabled
41 * - ecc: Correction enabled
42 *
43 * The request direction is impacting the logic as well:
44 * - read: Load data from the NAND chip
45 * - write: Store data in the NAND chip
46 *
47 * Mixing all this combinations together gives the following behavior.
48 * Those are just examples, drivers are free to add custom steps in their
49 * prepare/finish hook.
50 *
51 * [external ECC engine]
52 * - external + prepare + raw + read: do nothing
53 * - external + finish + raw + read: do nothing
54 * - external + prepare + raw + write: do nothing
55 * - external + finish + raw + write: do nothing
56 * - external + prepare + ecc + read: do nothing
57 * - external + finish + ecc + read: calculate expected ECC bytes, extract
58 * ECC bytes from OOB buffer, correct
59 * and report any bitflip/error
60 * - external + prepare + ecc + write: calculate ECC bytes and store them at
61 * the right place in the OOB buffer based
62 * on the OOB layout
63 * - external + finish + ecc + write: do nothing
64 *
65 * [pipelined ECC engine]
66 * - pipelined + prepare + raw + read: disable the controller's ECC engine if
67 * activated
68 * - pipelined + finish + raw + read: do nothing
69 * - pipelined + prepare + raw + write: disable the controller's ECC engine if
70 * activated
71 * - pipelined + finish + raw + write: do nothing
72 * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
73 * deactivated
74 * - pipelined + finish + ecc + read: check the status, report any
75 * error/bitflip
76 * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
77 * deactivated
78 * - pipelined + finish + ecc + write: do nothing
79 *
80 * [ondie ECC engine]
81 * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
82 * engine if activated
83 * - ondie + finish + raw + read: do nothing
84 * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
85 * engine if activated
86 * - ondie + finish + raw + write: do nothing
87 * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
88 * engine if deactivated
89 * - ondie + finish + ecc + read: send commands to check the status, report
90 * any error/bitflip
91 * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
92 * engine if deactivated
93 * - ondie + finish + ecc + write: do nothing
94 */
95
96 #include <linux/module.h>
97 #include <linux/mtd/nand.h>
98
99 /**
100 * nand_ecc_init_ctx - Init the ECC engine context
101 * @nand: the NAND device
102 *
103 * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
104 */
nand_ecc_init_ctx(struct nand_device * nand)105 int nand_ecc_init_ctx(struct nand_device *nand)
106 {
107 if (!nand->ecc.engine->ops->init_ctx)
108 return 0;
109
110 return nand->ecc.engine->ops->init_ctx(nand);
111 }
112 EXPORT_SYMBOL(nand_ecc_init_ctx);
113
114 /**
115 * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
116 * @nand: the NAND device
117 */
nand_ecc_cleanup_ctx(struct nand_device * nand)118 void nand_ecc_cleanup_ctx(struct nand_device *nand)
119 {
120 if (nand->ecc.engine->ops->cleanup_ctx)
121 nand->ecc.engine->ops->cleanup_ctx(nand);
122 }
123 EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
124
125 /**
126 * nand_ecc_prepare_io_req - Prepare an I/O request
127 * @nand: the NAND device
128 * @req: the I/O request
129 */
nand_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)130 int nand_ecc_prepare_io_req(struct nand_device *nand,
131 struct nand_page_io_req *req)
132 {
133 if (!nand->ecc.engine->ops->prepare_io_req)
134 return 0;
135
136 return nand->ecc.engine->ops->prepare_io_req(nand, req);
137 }
138 EXPORT_SYMBOL(nand_ecc_prepare_io_req);
139
140 /**
141 * nand_ecc_finish_io_req - Finish an I/O request
142 * @nand: the NAND device
143 * @req: the I/O request
144 */
nand_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)145 int nand_ecc_finish_io_req(struct nand_device *nand,
146 struct nand_page_io_req *req)
147 {
148 if (!nand->ecc.engine->ops->finish_io_req)
149 return 0;
150
151 return nand->ecc.engine->ops->finish_io_req(nand, req);
152 }
153 EXPORT_SYMBOL(nand_ecc_finish_io_req);
154
155 /* Define default OOB placement schemes for large and small page devices */
nand_ooblayout_ecc_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)156 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
157 struct mtd_oob_region *oobregion)
158 {
159 struct nand_device *nand = mtd_to_nanddev(mtd);
160 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
161
162 if (section > 1)
163 return -ERANGE;
164
165 if (!section) {
166 oobregion->offset = 0;
167 if (mtd->oobsize == 16)
168 oobregion->length = 4;
169 else
170 oobregion->length = 3;
171 } else {
172 if (mtd->oobsize == 8)
173 return -ERANGE;
174
175 oobregion->offset = 6;
176 oobregion->length = total_ecc_bytes - 4;
177 }
178
179 return 0;
180 }
181
nand_ooblayout_free_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)182 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
183 struct mtd_oob_region *oobregion)
184 {
185 if (section > 1)
186 return -ERANGE;
187
188 if (mtd->oobsize == 16) {
189 if (section)
190 return -ERANGE;
191
192 oobregion->length = 8;
193 oobregion->offset = 8;
194 } else {
195 oobregion->length = 2;
196 if (!section)
197 oobregion->offset = 3;
198 else
199 oobregion->offset = 6;
200 }
201
202 return 0;
203 }
204
205 static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
206 .ecc = nand_ooblayout_ecc_sp,
207 .free = nand_ooblayout_free_sp,
208 };
209
nand_get_small_page_ooblayout(void)210 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
211 {
212 return &nand_ooblayout_sp_ops;
213 }
214 EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
215
nand_ooblayout_ecc_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)216 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
217 struct mtd_oob_region *oobregion)
218 {
219 struct nand_device *nand = mtd_to_nanddev(mtd);
220 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
221
222 if (section || !total_ecc_bytes)
223 return -ERANGE;
224
225 oobregion->length = total_ecc_bytes;
226 oobregion->offset = mtd->oobsize - oobregion->length;
227
228 return 0;
229 }
230
nand_ooblayout_free_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)231 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
232 struct mtd_oob_region *oobregion)
233 {
234 struct nand_device *nand = mtd_to_nanddev(mtd);
235 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
236
237 if (section)
238 return -ERANGE;
239
240 oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
241 oobregion->offset = 2;
242
243 return 0;
244 }
245
246 static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
247 .ecc = nand_ooblayout_ecc_lp,
248 .free = nand_ooblayout_free_lp,
249 };
250
nand_get_large_page_ooblayout(void)251 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
252 {
253 return &nand_ooblayout_lp_ops;
254 }
255 EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
256
257 /*
258 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
259 * are placed at a fixed offset.
260 */
nand_ooblayout_ecc_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)261 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
262 struct mtd_oob_region *oobregion)
263 {
264 struct nand_device *nand = mtd_to_nanddev(mtd);
265 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
266
267 if (section)
268 return -ERANGE;
269
270 switch (mtd->oobsize) {
271 case 64:
272 oobregion->offset = 40;
273 break;
274 case 128:
275 oobregion->offset = 80;
276 break;
277 default:
278 return -EINVAL;
279 }
280
281 oobregion->length = total_ecc_bytes;
282 if (oobregion->offset + oobregion->length > mtd->oobsize)
283 return -ERANGE;
284
285 return 0;
286 }
287
nand_ooblayout_free_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)288 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
289 struct mtd_oob_region *oobregion)
290 {
291 struct nand_device *nand = mtd_to_nanddev(mtd);
292 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
293 int ecc_offset = 0;
294
295 if (section < 0 || section > 1)
296 return -ERANGE;
297
298 switch (mtd->oobsize) {
299 case 64:
300 ecc_offset = 40;
301 break;
302 case 128:
303 ecc_offset = 80;
304 break;
305 default:
306 return -EINVAL;
307 }
308
309 if (section == 0) {
310 oobregion->offset = 2;
311 oobregion->length = ecc_offset - 2;
312 } else {
313 oobregion->offset = ecc_offset + total_ecc_bytes;
314 oobregion->length = mtd->oobsize - oobregion->offset;
315 }
316
317 return 0;
318 }
319
320 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
321 .ecc = nand_ooblayout_ecc_lp_hamming,
322 .free = nand_ooblayout_free_lp_hamming,
323 };
324
nand_get_large_page_hamming_ooblayout(void)325 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
326 {
327 return &nand_ooblayout_lp_hamming_ops;
328 }
329 EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
330
331 static enum nand_ecc_engine_type
of_get_nand_ecc_engine_type(struct device_node * np)332 of_get_nand_ecc_engine_type(struct device_node *np)
333 {
334 struct device_node *eng_np;
335
336 if (of_property_read_bool(np, "nand-no-ecc-engine"))
337 return NAND_ECC_ENGINE_TYPE_NONE;
338
339 if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
340 return NAND_ECC_ENGINE_TYPE_SOFT;
341
342 eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
343 of_node_put(eng_np);
344
345 if (eng_np) {
346 if (eng_np == np)
347 return NAND_ECC_ENGINE_TYPE_ON_DIE;
348 else
349 return NAND_ECC_ENGINE_TYPE_ON_HOST;
350 }
351
352 return NAND_ECC_ENGINE_TYPE_INVALID;
353 }
354
355 static const char * const nand_ecc_placement[] = {
356 [NAND_ECC_PLACEMENT_OOB] = "oob",
357 [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
358 };
359
of_get_nand_ecc_placement(struct device_node * np)360 static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
361 {
362 enum nand_ecc_placement placement;
363 const char *pm;
364 int err;
365
366 err = of_property_read_string(np, "nand-ecc-placement", &pm);
367 if (!err) {
368 for (placement = NAND_ECC_PLACEMENT_OOB;
369 placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
370 if (!strcasecmp(pm, nand_ecc_placement[placement]))
371 return placement;
372 }
373 }
374
375 return NAND_ECC_PLACEMENT_UNKNOWN;
376 }
377
378 static const char * const nand_ecc_algos[] = {
379 [NAND_ECC_ALGO_HAMMING] = "hamming",
380 [NAND_ECC_ALGO_BCH] = "bch",
381 [NAND_ECC_ALGO_RS] = "rs",
382 };
383
of_get_nand_ecc_algo(struct device_node * np)384 static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
385 {
386 enum nand_ecc_algo ecc_algo;
387 const char *pm;
388 int err;
389
390 err = of_property_read_string(np, "nand-ecc-algo", &pm);
391 if (!err) {
392 for (ecc_algo = NAND_ECC_ALGO_HAMMING;
393 ecc_algo < ARRAY_SIZE(nand_ecc_algos);
394 ecc_algo++) {
395 if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
396 return ecc_algo;
397 }
398 }
399
400 return NAND_ECC_ALGO_UNKNOWN;
401 }
402
of_get_nand_ecc_step_size(struct device_node * np)403 static int of_get_nand_ecc_step_size(struct device_node *np)
404 {
405 int ret;
406 u32 val;
407
408 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
409 return ret ? ret : val;
410 }
411
of_get_nand_ecc_strength(struct device_node * np)412 static int of_get_nand_ecc_strength(struct device_node *np)
413 {
414 int ret;
415 u32 val;
416
417 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
418 return ret ? ret : val;
419 }
420
of_get_nand_ecc_user_config(struct nand_device * nand)421 void of_get_nand_ecc_user_config(struct nand_device *nand)
422 {
423 struct device_node *dn = nanddev_get_of_node(nand);
424 int strength, size;
425
426 nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
427 nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
428 nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
429
430 strength = of_get_nand_ecc_strength(dn);
431 if (strength >= 0)
432 nand->ecc.user_conf.strength = strength;
433
434 size = of_get_nand_ecc_step_size(dn);
435 if (size >= 0)
436 nand->ecc.user_conf.step_size = size;
437
438 if (of_property_read_bool(dn, "nand-ecc-maximize"))
439 nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
440 }
441 EXPORT_SYMBOL(of_get_nand_ecc_user_config);
442
443 /**
444 * nand_ecc_is_strong_enough - Check if the chip configuration meets the
445 * datasheet requirements.
446 *
447 * @nand: Device to check
448 *
449 * If our configuration corrects A bits per B bytes and the minimum
450 * required correction level is X bits per Y bytes, then we must ensure
451 * both of the following are true:
452 *
453 * (1) A / B >= X / Y
454 * (2) A >= X
455 *
456 * Requirement (1) ensures we can correct for the required bitflip density.
457 * Requirement (2) ensures we can correct even when all bitflips are clumped
458 * in the same sector.
459 */
nand_ecc_is_strong_enough(struct nand_device * nand)460 bool nand_ecc_is_strong_enough(struct nand_device *nand)
461 {
462 const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
463 const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
464 struct mtd_info *mtd = nanddev_to_mtd(nand);
465 int corr, ds_corr;
466
467 if (conf->step_size == 0 || reqs->step_size == 0)
468 /* Not enough information */
469 return true;
470
471 /*
472 * We get the number of corrected bits per page to compare
473 * the correction density.
474 */
475 corr = (mtd->writesize * conf->strength) / conf->step_size;
476 ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
477
478 return corr >= ds_corr && conf->strength >= reqs->strength;
479 }
480 EXPORT_SYMBOL(nand_ecc_is_strong_enough);
481
482 MODULE_LICENSE("GPL");
483 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
484 MODULE_DESCRIPTION("Generic ECC engine");
485