• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017 Free Electrons
4  *
5  * Authors:
6  *	Boris Brezillon <boris.brezillon@free-electrons.com>
7  *	Peter Pan <peterpandong@micron.com>
8  */
9 
10 #define pr_fmt(fmt)	"nand: " fmt
11 
12 #include <linux/module.h>
13 #include <linux/mtd/nand.h>
14 
15 /**
16  * nanddev_isbad() - Check if a block is bad
17  * @nand: NAND device
18  * @pos: position pointing to the block we want to check
19  *
20  * Return: true if the block is bad, false otherwise.
21  */
nanddev_isbad(struct nand_device * nand,const struct nand_pos * pos)22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
23 {
24 	if (mtd_check_expert_analysis_mode())
25 		return false;
26 
27 	if (nanddev_bbt_is_initialized(nand)) {
28 		unsigned int entry;
29 		int status;
30 
31 		entry = nanddev_bbt_pos_to_entry(nand, pos);
32 		status = nanddev_bbt_get_block_status(nand, entry);
33 		/* Lazy block status retrieval */
34 		if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
35 			if (nand->ops->isbad(nand, pos))
36 				status = NAND_BBT_BLOCK_FACTORY_BAD;
37 			else
38 				status = NAND_BBT_BLOCK_GOOD;
39 
40 			nanddev_bbt_set_block_status(nand, entry, status);
41 		}
42 
43 		if (status == NAND_BBT_BLOCK_WORN ||
44 		    status == NAND_BBT_BLOCK_FACTORY_BAD)
45 			return true;
46 
47 		return false;
48 	}
49 
50 	return nand->ops->isbad(nand, pos);
51 }
52 EXPORT_SYMBOL_GPL(nanddev_isbad);
53 
54 /**
55  * nanddev_markbad() - Mark a block as bad
56  * @nand: NAND device
57  * @pos: position of the block to mark bad
58  *
59  * Mark a block bad. This function is updating the BBT if available and
60  * calls the low-level markbad hook (nand->ops->markbad()).
61  *
62  * Return: 0 in case of success, a negative error code otherwise.
63  */
nanddev_markbad(struct nand_device * nand,const struct nand_pos * pos)64 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
65 {
66 	struct mtd_info *mtd = nanddev_to_mtd(nand);
67 	unsigned int entry;
68 	int ret = 0;
69 
70 	if (nanddev_isbad(nand, pos))
71 		return 0;
72 
73 	ret = nand->ops->markbad(nand, pos);
74 	if (ret)
75 		pr_warn("failed to write BBM to block @%llx (err = %d)\n",
76 			nanddev_pos_to_offs(nand, pos), ret);
77 
78 	if (!nanddev_bbt_is_initialized(nand))
79 		goto out;
80 
81 	entry = nanddev_bbt_pos_to_entry(nand, pos);
82 	ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
83 	if (ret)
84 		goto out;
85 
86 	ret = nanddev_bbt_update(nand);
87 
88 out:
89 	if (!ret)
90 		mtd->ecc_stats.badblocks++;
91 
92 	return ret;
93 }
94 EXPORT_SYMBOL_GPL(nanddev_markbad);
95 
96 /**
97  * nanddev_isreserved() - Check whether an eraseblock is reserved or not
98  * @nand: NAND device
99  * @pos: NAND position to test
100  *
101  * Checks whether the eraseblock pointed by @pos is reserved or not.
102  *
103  * Return: true if the eraseblock is reserved, false otherwise.
104  */
nanddev_isreserved(struct nand_device * nand,const struct nand_pos * pos)105 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
106 {
107 	unsigned int entry;
108 	int status;
109 
110 	if (!nanddev_bbt_is_initialized(nand))
111 		return false;
112 
113 	/* Return info from the table */
114 	entry = nanddev_bbt_pos_to_entry(nand, pos);
115 	status = nanddev_bbt_get_block_status(nand, entry);
116 	return status == NAND_BBT_BLOCK_RESERVED;
117 }
118 EXPORT_SYMBOL_GPL(nanddev_isreserved);
119 
120 /**
121  * nanddev_erase() - Erase a NAND portion
122  * @nand: NAND device
123  * @pos: position of the block to erase
124  *
125  * Erases the block if it's not bad.
126  *
127  * Return: 0 in case of success, a negative error code otherwise.
128  */
nanddev_erase(struct nand_device * nand,const struct nand_pos * pos)129 static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
130 {
131 	if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
132 		pr_warn("attempt to erase a bad/reserved block @%llx\n",
133 			nanddev_pos_to_offs(nand, pos));
134 		return -EIO;
135 	}
136 
137 	return nand->ops->erase(nand, pos);
138 }
139 
140 /**
141  * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
142  * @mtd: MTD device
143  * @einfo: erase request
144  *
145  * This is a simple mtd->_erase() implementation iterating over all blocks
146  * concerned by @einfo and calling nand->ops->erase() on each of them.
147  *
148  * Note that mtd->_erase should not be directly assigned to this helper,
149  * because there's no locking here. NAND specialized layers should instead
150  * implement there own wrapper around nanddev_mtd_erase() taking the
151  * appropriate lock before calling nanddev_mtd_erase().
152  *
153  * Return: 0 in case of success, a negative error code otherwise.
154  */
nanddev_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)155 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
156 {
157 	struct nand_device *nand = mtd_to_nanddev(mtd);
158 	struct nand_pos pos, last;
159 	int ret;
160 
161 	nanddev_offs_to_pos(nand, einfo->addr, &pos);
162 	nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
163 	while (nanddev_pos_cmp(&pos, &last) <= 0) {
164 		ret = nanddev_erase(nand, &pos);
165 		if (ret) {
166 			einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
167 
168 			return ret;
169 		}
170 
171 		nanddev_pos_next_eraseblock(nand, &pos);
172 	}
173 
174 	return 0;
175 }
176 EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
177 
178 /**
179  * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
180  *				  a specific region of the NAND device
181  * @mtd: MTD device
182  * @offs: offset of the NAND region
183  * @len: length of the NAND region
184  *
185  * Default implementation for mtd->_max_bad_blocks(). Only works if
186  * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
187  *
188  * Return: a positive number encoding the maximum number of eraseblocks on a
189  * portion of memory, a negative error code otherwise.
190  */
nanddev_mtd_max_bad_blocks(struct mtd_info * mtd,loff_t offs,size_t len)191 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
192 {
193 	struct nand_device *nand = mtd_to_nanddev(mtd);
194 	struct nand_pos pos, end;
195 	unsigned int max_bb = 0;
196 
197 	if (!nand->memorg.max_bad_eraseblocks_per_lun)
198 		return -ENOTSUPP;
199 
200 	nanddev_offs_to_pos(nand, offs, &pos);
201 	nanddev_offs_to_pos(nand, offs + len, &end);
202 
203 	for (nanddev_offs_to_pos(nand, offs, &pos);
204 	     nanddev_pos_cmp(&pos, &end) < 0;
205 	     nanddev_pos_next_lun(nand, &pos))
206 		max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
207 
208 	return max_bb;
209 }
210 EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
211 
212 /**
213  * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
214  * @nand: NAND device
215  */
nanddev_get_ecc_engine(struct nand_device * nand)216 static int nanddev_get_ecc_engine(struct nand_device *nand)
217 {
218 	int engine_type;
219 
220 	/* Read the user desires in terms of ECC engine/configuration */
221 	of_get_nand_ecc_user_config(nand);
222 
223 	engine_type = nand->ecc.user_conf.engine_type;
224 	if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
225 		engine_type = nand->ecc.defaults.engine_type;
226 
227 	switch (engine_type) {
228 	case NAND_ECC_ENGINE_TYPE_NONE:
229 		return 0;
230 	case NAND_ECC_ENGINE_TYPE_SOFT:
231 		nand->ecc.engine = nand_ecc_get_sw_engine(nand);
232 		break;
233 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
234 		nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
235 		break;
236 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
237 		nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
238 		if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
239 			return -EPROBE_DEFER;
240 		break;
241 	default:
242 		pr_err("Missing ECC engine type\n");
243 	}
244 
245 	if (!nand->ecc.engine)
246 		return  -EINVAL;
247 
248 	return 0;
249 }
250 
251 /**
252  * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
253  * @nand: NAND device
254  */
nanddev_put_ecc_engine(struct nand_device * nand)255 static int nanddev_put_ecc_engine(struct nand_device *nand)
256 {
257 	switch (nand->ecc.ctx.conf.engine_type) {
258 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
259 		nand_ecc_put_on_host_hw_engine(nand);
260 		break;
261 	case NAND_ECC_ENGINE_TYPE_NONE:
262 	case NAND_ECC_ENGINE_TYPE_SOFT:
263 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
264 	default:
265 		break;
266 	}
267 
268 	return 0;
269 }
270 
271 /**
272  * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
273  * @nand: NAND device
274  */
nanddev_find_ecc_configuration(struct nand_device * nand)275 static int nanddev_find_ecc_configuration(struct nand_device *nand)
276 {
277 	int ret;
278 
279 	if (!nand->ecc.engine)
280 		return -ENOTSUPP;
281 
282 	ret = nand_ecc_init_ctx(nand);
283 	if (ret)
284 		return ret;
285 
286 	if (!nand_ecc_is_strong_enough(nand))
287 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
288 			nand->mtd.name);
289 
290 	return 0;
291 }
292 
293 /**
294  * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
295  * @nand: NAND device
296  */
nanddev_ecc_engine_init(struct nand_device * nand)297 int nanddev_ecc_engine_init(struct nand_device *nand)
298 {
299 	int ret;
300 
301 	/* Look for the ECC engine to use */
302 	ret = nanddev_get_ecc_engine(nand);
303 	if (ret) {
304 		if (ret != -EPROBE_DEFER)
305 			pr_err("No ECC engine found\n");
306 
307 		return ret;
308 	}
309 
310 	/* No ECC engine requested */
311 	if (!nand->ecc.engine)
312 		return 0;
313 
314 	/* Configure the engine: balance user input and chip requirements */
315 	ret = nanddev_find_ecc_configuration(nand);
316 	if (ret) {
317 		pr_err("No suitable ECC configuration\n");
318 		nanddev_put_ecc_engine(nand);
319 
320 		return ret;
321 	}
322 
323 	return 0;
324 }
325 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
326 
327 /**
328  * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
329  * @nand: NAND device
330  */
nanddev_ecc_engine_cleanup(struct nand_device * nand)331 void nanddev_ecc_engine_cleanup(struct nand_device *nand)
332 {
333 	if (nand->ecc.engine)
334 		nand_ecc_cleanup_ctx(nand);
335 
336 	nanddev_put_ecc_engine(nand);
337 }
338 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
339 
340 /**
341  * nanddev_init() - Initialize a NAND device
342  * @nand: NAND device
343  * @ops: NAND device operations
344  * @owner: NAND device owner
345  *
346  * Initializes a NAND device object. Consistency checks are done on @ops and
347  * @nand->memorg. Also takes care of initializing the BBT.
348  *
349  * Return: 0 in case of success, a negative error code otherwise.
350  */
nanddev_init(struct nand_device * nand,const struct nand_ops * ops,struct module * owner)351 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
352 		 struct module *owner)
353 {
354 	struct mtd_info *mtd = nanddev_to_mtd(nand);
355 	struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
356 
357 	if (!nand || !ops)
358 		return -EINVAL;
359 
360 	if (!ops->erase || !ops->markbad || !ops->isbad)
361 		return -EINVAL;
362 
363 	if (!memorg->bits_per_cell || !memorg->pagesize ||
364 	    !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
365 	    !memorg->planes_per_lun || !memorg->luns_per_target ||
366 	    !memorg->ntargets)
367 		return -EINVAL;
368 
369 	nand->rowconv.eraseblock_addr_shift =
370 					fls(memorg->pages_per_eraseblock - 1);
371 	nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
372 				       nand->rowconv.eraseblock_addr_shift;
373 
374 	nand->ops = ops;
375 
376 	mtd->type = memorg->bits_per_cell == 1 ?
377 		    MTD_NANDFLASH : MTD_MLCNANDFLASH;
378 	mtd->flags = MTD_CAP_NANDFLASH;
379 	mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
380 	mtd->writesize = memorg->pagesize;
381 	mtd->writebufsize = memorg->pagesize;
382 	mtd->oobsize = memorg->oobsize;
383 	mtd->size = nanddev_size(nand);
384 	mtd->owner = owner;
385 
386 	return nanddev_bbt_init(nand);
387 }
388 EXPORT_SYMBOL_GPL(nanddev_init);
389 
390 /**
391  * nanddev_cleanup() - Release resources allocated in nanddev_init()
392  * @nand: NAND device
393  *
394  * Basically undoes what has been done in nanddev_init().
395  */
nanddev_cleanup(struct nand_device * nand)396 void nanddev_cleanup(struct nand_device *nand)
397 {
398 	if (nanddev_bbt_is_initialized(nand))
399 		nanddev_bbt_cleanup(nand);
400 }
401 EXPORT_SYMBOL_GPL(nanddev_cleanup);
402 
403 MODULE_DESCRIPTION("Generic NAND framework");
404 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
405 MODULE_LICENSE("GPL v2");
406