• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
24 {
25 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
26 						      spinand->scratchbuf);
27 	int ret;
28 
29 	ret = spi_mem_exec_op(spinand->spimem, &op);
30 	if (ret)
31 		return ret;
32 
33 	*val = *spinand->scratchbuf;
34 	return 0;
35 }
36 
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)37 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
38 {
39 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
40 						      spinand->scratchbuf);
41 
42 	*spinand->scratchbuf = val;
43 	return spi_mem_exec_op(spinand->spimem, &op);
44 }
45 
spinand_read_status(struct spinand_device * spinand,u8 * status)46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
47 {
48 	return spinand_read_reg_op(spinand, REG_STATUS, status);
49 }
50 
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
52 {
53 	struct nand_device *nand = spinand_to_nand(spinand);
54 
55 	if (WARN_ON(spinand->cur_target < 0 ||
56 		    spinand->cur_target >= nand->memorg.ntargets))
57 		return -EINVAL;
58 
59 	*cfg = spinand->cfg_cache[spinand->cur_target];
60 	return 0;
61 }
62 
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
64 {
65 	struct nand_device *nand = spinand_to_nand(spinand);
66 	int ret;
67 
68 	if (WARN_ON(spinand->cur_target < 0 ||
69 		    spinand->cur_target >= nand->memorg.ntargets))
70 		return -EINVAL;
71 
72 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
73 		return 0;
74 
75 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
76 	if (ret)
77 		return ret;
78 
79 	spinand->cfg_cache[spinand->cur_target] = cfg;
80 	return 0;
81 }
82 
83 /**
84  * spinand_upd_cfg() - Update the configuration register
85  * @spinand: the spinand device
86  * @mask: the mask encoding the bits to update in the config reg
87  * @val: the new value to apply
88  *
89  * Update the configuration register.
90  *
91  * Return: 0 on success, a negative error code otherwise.
92  */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
94 {
95 	int ret;
96 	u8 cfg;
97 
98 	ret = spinand_get_cfg(spinand, &cfg);
99 	if (ret)
100 		return ret;
101 
102 	cfg &= ~mask;
103 	cfg |= val;
104 
105 	return spinand_set_cfg(spinand, cfg);
106 }
107 
108 /**
109  * spinand_select_target() - Select a specific NAND target/die
110  * @spinand: the spinand device
111  * @target: the target/die to select
112  *
113  * Select a new target/die. If chip only has one die, this function is a NOOP.
114  *
115  * Return: 0 on success, a negative error code otherwise.
116  */
spinand_select_target(struct spinand_device * spinand,unsigned int target)117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
118 {
119 	struct nand_device *nand = spinand_to_nand(spinand);
120 	int ret;
121 
122 	if (WARN_ON(target >= nand->memorg.ntargets))
123 		return -EINVAL;
124 
125 	if (spinand->cur_target == target)
126 		return 0;
127 
128 	if (nand->memorg.ntargets == 1) {
129 		spinand->cur_target = target;
130 		return 0;
131 	}
132 
133 	ret = spinand->select_target(spinand, target);
134 	if (ret)
135 		return ret;
136 
137 	spinand->cur_target = target;
138 	return 0;
139 }
140 
spinand_read_cfg(struct spinand_device * spinand)141 static int spinand_read_cfg(struct spinand_device *spinand)
142 {
143 	struct nand_device *nand = spinand_to_nand(spinand);
144 	unsigned int target;
145 	int ret;
146 
147 	for (target = 0; target < nand->memorg.ntargets; target++) {
148 		ret = spinand_select_target(spinand, target);
149 		if (ret)
150 			return ret;
151 
152 		/*
153 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
154 		 * here to bypass the config cache.
155 		 */
156 		ret = spinand_read_reg_op(spinand, REG_CFG,
157 					  &spinand->cfg_cache[target]);
158 		if (ret)
159 			return ret;
160 	}
161 
162 	return 0;
163 }
164 
spinand_init_cfg_cache(struct spinand_device * spinand)165 static int spinand_init_cfg_cache(struct spinand_device *spinand)
166 {
167 	struct nand_device *nand = spinand_to_nand(spinand);
168 	struct device *dev = &spinand->spimem->spi->dev;
169 
170 	spinand->cfg_cache = devm_kcalloc(dev,
171 					  nand->memorg.ntargets,
172 					  sizeof(*spinand->cfg_cache),
173 					  GFP_KERNEL);
174 	if (!spinand->cfg_cache)
175 		return -ENOMEM;
176 
177 	return 0;
178 }
179 
spinand_init_quad_enable(struct spinand_device * spinand)180 static int spinand_init_quad_enable(struct spinand_device *spinand)
181 {
182 	bool enable = false;
183 
184 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
185 		return 0;
186 
187 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
188 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
189 	    spinand->op_templates.update_cache->data.buswidth == 4)
190 		enable = true;
191 
192 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
193 			       enable ? CFG_QUAD_ENABLE : 0);
194 }
195 
spinand_ecc_enable(struct spinand_device * spinand,bool enable)196 static int spinand_ecc_enable(struct spinand_device *spinand,
197 			      bool enable)
198 {
199 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
200 			       enable ? CFG_ECC_ENABLE : 0);
201 }
202 
spinand_cont_read_enable(struct spinand_device * spinand,bool enable)203 static int spinand_cont_read_enable(struct spinand_device *spinand,
204 				    bool enable)
205 {
206 	return spinand->set_cont_read(spinand, enable);
207 }
208 
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)209 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
210 {
211 	struct nand_device *nand = spinand_to_nand(spinand);
212 
213 	if (spinand->eccinfo.get_status)
214 		return spinand->eccinfo.get_status(spinand, status);
215 
216 	switch (status & STATUS_ECC_MASK) {
217 	case STATUS_ECC_NO_BITFLIPS:
218 		return 0;
219 
220 	case STATUS_ECC_HAS_BITFLIPS:
221 		/*
222 		 * We have no way to know exactly how many bitflips have been
223 		 * fixed, so let's return the maximum possible value so that
224 		 * wear-leveling layers move the data immediately.
225 		 */
226 		return nanddev_get_ecc_conf(nand)->strength;
227 
228 	case STATUS_ECC_UNCOR_ERROR:
229 		return -EBADMSG;
230 
231 	default:
232 		break;
233 	}
234 
235 	return -EINVAL;
236 }
237 
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)238 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
239 				       struct mtd_oob_region *region)
240 {
241 	return -ERANGE;
242 }
243 
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)244 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
245 					struct mtd_oob_region *region)
246 {
247 	if (section)
248 		return -ERANGE;
249 
250 	/* Reserve 2 bytes for the BBM. */
251 	region->offset = 2;
252 	region->length = 62;
253 
254 	return 0;
255 }
256 
257 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
258 	.ecc = spinand_noecc_ooblayout_ecc,
259 	.free = spinand_noecc_ooblayout_free,
260 };
261 
spinand_ondie_ecc_init_ctx(struct nand_device * nand)262 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
263 {
264 	struct spinand_device *spinand = nand_to_spinand(nand);
265 	struct mtd_info *mtd = nanddev_to_mtd(nand);
266 	struct spinand_ondie_ecc_conf *engine_conf;
267 
268 	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
269 	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
270 	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
271 
272 	engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
273 	if (!engine_conf)
274 		return -ENOMEM;
275 
276 	nand->ecc.ctx.priv = engine_conf;
277 
278 	if (spinand->eccinfo.ooblayout)
279 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
280 	else
281 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
282 
283 	return 0;
284 }
285 
spinand_ondie_ecc_cleanup_ctx(struct nand_device * nand)286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
287 {
288 	kfree(nand->ecc.ctx.priv);
289 }
290 
spinand_ondie_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
292 					    struct nand_page_io_req *req)
293 {
294 	struct spinand_device *spinand = nand_to_spinand(nand);
295 	bool enable = (req->mode != MTD_OPS_RAW);
296 
297 	memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
298 
299 	/* Only enable or disable the engine */
300 	return spinand_ecc_enable(spinand, enable);
301 }
302 
spinand_ondie_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)303 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
304 					   struct nand_page_io_req *req)
305 {
306 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
307 	struct spinand_device *spinand = nand_to_spinand(nand);
308 	struct mtd_info *mtd = spinand_to_mtd(spinand);
309 	int ret;
310 
311 	if (req->mode == MTD_OPS_RAW)
312 		return 0;
313 
314 	/* Nothing to do when finishing a page write */
315 	if (req->type == NAND_PAGE_WRITE)
316 		return 0;
317 
318 	/* Finish a page read: check the status, report errors/bitflips */
319 	ret = spinand_check_ecc_status(spinand, engine_conf->status);
320 	if (ret == -EBADMSG) {
321 		mtd->ecc_stats.failed++;
322 	} else if (ret > 0) {
323 		unsigned int pages;
324 
325 		/*
326 		 * Continuous reads don't allow us to get the detail,
327 		 * so we may exagerate the actual number of corrected bitflips.
328 		 */
329 		if (!req->continuous)
330 			pages = 1;
331 		else
332 			pages = req->datalen / nanddev_page_size(nand);
333 
334 		mtd->ecc_stats.corrected += ret * pages;
335 	}
336 
337 	return ret;
338 }
339 
340 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
341 	.init_ctx = spinand_ondie_ecc_init_ctx,
342 	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
343 	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
344 	.finish_io_req = spinand_ondie_ecc_finish_io_req,
345 };
346 
347 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
348 	.ops = &spinand_ondie_ecc_engine_ops,
349 };
350 
spinand_ondie_ecc_save_status(struct nand_device * nand,u8 status)351 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
352 {
353 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
354 
355 	if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
356 	    engine_conf)
357 		engine_conf->status = status;
358 }
359 
spinand_write_enable_op(struct spinand_device * spinand)360 static int spinand_write_enable_op(struct spinand_device *spinand)
361 {
362 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
363 
364 	return spi_mem_exec_op(spinand->spimem, &op);
365 }
366 
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)367 static int spinand_load_page_op(struct spinand_device *spinand,
368 				const struct nand_page_io_req *req)
369 {
370 	struct nand_device *nand = spinand_to_nand(spinand);
371 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
372 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
373 
374 	return spi_mem_exec_op(spinand->spimem, &op);
375 }
376 
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)377 static int spinand_read_from_cache_op(struct spinand_device *spinand,
378 				      const struct nand_page_io_req *req)
379 {
380 	struct nand_device *nand = spinand_to_nand(spinand);
381 	struct mtd_info *mtd = spinand_to_mtd(spinand);
382 	struct spi_mem_dirmap_desc *rdesc;
383 	unsigned int nbytes = 0;
384 	void *buf = NULL;
385 	u16 column = 0;
386 	ssize_t ret;
387 
388 	if (req->datalen) {
389 		buf = spinand->databuf;
390 		if (!req->continuous)
391 			nbytes = nanddev_page_size(nand);
392 		else
393 			nbytes = round_up(req->dataoffs + req->datalen,
394 					  nanddev_page_size(nand));
395 		column = 0;
396 	}
397 
398 	if (req->ooblen) {
399 		nbytes += nanddev_per_page_oobsize(nand);
400 		if (!buf) {
401 			buf = spinand->oobbuf;
402 			column = nanddev_page_size(nand);
403 		}
404 	}
405 
406 	if (req->mode == MTD_OPS_RAW)
407 		rdesc = spinand->dirmaps[req->pos.plane].rdesc;
408 	else
409 		rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
410 
411 	if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
412 		column |= req->pos.plane << fls(nanddev_page_size(nand));
413 
414 	while (nbytes) {
415 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
416 		if (ret < 0)
417 			return ret;
418 
419 		if (!ret || ret > nbytes)
420 			return -EIO;
421 
422 		nbytes -= ret;
423 		column += ret;
424 		buf += ret;
425 
426 		/*
427 		 * Dirmap accesses are allowed to toggle the CS.
428 		 * Toggling the CS during a continuous read is forbidden.
429 		 */
430 		if (nbytes && req->continuous)
431 			return -EIO;
432 	}
433 
434 	if (req->datalen)
435 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
436 		       req->datalen);
437 
438 	if (req->ooblen) {
439 		if (req->mode == MTD_OPS_AUTO_OOB)
440 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
441 						    spinand->oobbuf,
442 						    req->ooboffs,
443 						    req->ooblen);
444 		else
445 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
446 			       req->ooblen);
447 	}
448 
449 	return 0;
450 }
451 
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)452 static int spinand_write_to_cache_op(struct spinand_device *spinand,
453 				     const struct nand_page_io_req *req)
454 {
455 	struct nand_device *nand = spinand_to_nand(spinand);
456 	struct mtd_info *mtd = spinand_to_mtd(spinand);
457 	struct spi_mem_dirmap_desc *wdesc;
458 	unsigned int nbytes, column = 0;
459 	void *buf = spinand->databuf;
460 	ssize_t ret;
461 
462 	/*
463 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
464 	 * the cache content to 0xFF (depends on vendor implementation), so we
465 	 * must fill the page cache entirely even if we only want to program
466 	 * the data portion of the page, otherwise we might corrupt the BBM or
467 	 * user data previously programmed in OOB area.
468 	 *
469 	 * Only reset the data buffer manually, the OOB buffer is prepared by
470 	 * ECC engines ->prepare_io_req() callback.
471 	 */
472 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
473 	memset(spinand->databuf, 0xff, nanddev_page_size(nand));
474 
475 	if (req->datalen)
476 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
477 		       req->datalen);
478 
479 	if (req->ooblen) {
480 		if (req->mode == MTD_OPS_AUTO_OOB)
481 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
482 						    spinand->oobbuf,
483 						    req->ooboffs,
484 						    req->ooblen);
485 		else
486 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
487 			       req->ooblen);
488 	}
489 
490 	if (req->mode == MTD_OPS_RAW)
491 		wdesc = spinand->dirmaps[req->pos.plane].wdesc;
492 	else
493 		wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
494 
495 	if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
496 		column |= req->pos.plane << fls(nanddev_page_size(nand));
497 
498 	while (nbytes) {
499 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
500 		if (ret < 0)
501 			return ret;
502 
503 		if (!ret || ret > nbytes)
504 			return -EIO;
505 
506 		nbytes -= ret;
507 		column += ret;
508 		buf += ret;
509 	}
510 
511 	return 0;
512 }
513 
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)514 static int spinand_program_op(struct spinand_device *spinand,
515 			      const struct nand_page_io_req *req)
516 {
517 	struct nand_device *nand = spinand_to_nand(spinand);
518 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
519 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
520 
521 	return spi_mem_exec_op(spinand->spimem, &op);
522 }
523 
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)524 static int spinand_erase_op(struct spinand_device *spinand,
525 			    const struct nand_pos *pos)
526 {
527 	struct nand_device *nand = spinand_to_nand(spinand);
528 	unsigned int row = nanddev_pos_to_row(nand, pos);
529 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
530 
531 	return spi_mem_exec_op(spinand->spimem, &op);
532 }
533 
spinand_wait(struct spinand_device * spinand,unsigned long initial_delay_us,unsigned long poll_delay_us,u8 * s)534 static int spinand_wait(struct spinand_device *spinand,
535 			unsigned long initial_delay_us,
536 			unsigned long poll_delay_us,
537 			u8 *s)
538 {
539 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
540 						      spinand->scratchbuf);
541 	u8 status;
542 	int ret;
543 
544 	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
545 				  initial_delay_us,
546 				  poll_delay_us,
547 				  SPINAND_WAITRDY_TIMEOUT_MS);
548 	if (ret)
549 		return ret;
550 
551 	status = *spinand->scratchbuf;
552 	if (!(status & STATUS_BUSY))
553 		goto out;
554 
555 	/*
556 	 * Extra read, just in case the STATUS_READY bit has changed
557 	 * since our last check
558 	 */
559 	ret = spinand_read_status(spinand, &status);
560 	if (ret)
561 		return ret;
562 
563 out:
564 	if (s)
565 		*s = status;
566 
567 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
568 }
569 
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)570 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
571 			      u8 ndummy, u8 *buf)
572 {
573 	struct spi_mem_op op = SPINAND_READID_OP(
574 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
575 	int ret;
576 
577 	ret = spi_mem_exec_op(spinand->spimem, &op);
578 	if (!ret)
579 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
580 
581 	return ret;
582 }
583 
spinand_reset_op(struct spinand_device * spinand)584 static int spinand_reset_op(struct spinand_device *spinand)
585 {
586 	struct spi_mem_op op = SPINAND_RESET_OP;
587 	int ret;
588 
589 	ret = spi_mem_exec_op(spinand->spimem, &op);
590 	if (ret)
591 		return ret;
592 
593 	return spinand_wait(spinand,
594 			    SPINAND_RESET_INITIAL_DELAY_US,
595 			    SPINAND_RESET_POLL_DELAY_US,
596 			    NULL);
597 }
598 
spinand_lock_block(struct spinand_device * spinand,u8 lock)599 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
600 {
601 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
602 }
603 
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req)604 static int spinand_read_page(struct spinand_device *spinand,
605 			     const struct nand_page_io_req *req)
606 {
607 	struct nand_device *nand = spinand_to_nand(spinand);
608 	u8 status;
609 	int ret;
610 
611 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
612 	if (ret)
613 		return ret;
614 
615 	ret = spinand_load_page_op(spinand, req);
616 	if (ret)
617 		return ret;
618 
619 	ret = spinand_wait(spinand,
620 			   SPINAND_READ_INITIAL_DELAY_US,
621 			   SPINAND_READ_POLL_DELAY_US,
622 			   &status);
623 	if (ret < 0)
624 		return ret;
625 
626 	spinand_ondie_ecc_save_status(nand, status);
627 
628 	ret = spinand_read_from_cache_op(spinand, req);
629 	if (ret)
630 		return ret;
631 
632 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
633 }
634 
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)635 static int spinand_write_page(struct spinand_device *spinand,
636 			      const struct nand_page_io_req *req)
637 {
638 	struct nand_device *nand = spinand_to_nand(spinand);
639 	u8 status;
640 	int ret;
641 
642 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
643 	if (ret)
644 		return ret;
645 
646 	ret = spinand_write_enable_op(spinand);
647 	if (ret)
648 		return ret;
649 
650 	ret = spinand_write_to_cache_op(spinand, req);
651 	if (ret)
652 		return ret;
653 
654 	ret = spinand_program_op(spinand, req);
655 	if (ret)
656 		return ret;
657 
658 	ret = spinand_wait(spinand,
659 			   SPINAND_WRITE_INITIAL_DELAY_US,
660 			   SPINAND_WRITE_POLL_DELAY_US,
661 			   &status);
662 	if (ret)
663 		return ret;
664 
665 	if (status & STATUS_PROG_FAILED)
666 		return -EIO;
667 
668 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
669 }
670 
spinand_mtd_regular_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)671 static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
672 					 struct mtd_oob_ops *ops,
673 					 unsigned int *max_bitflips)
674 {
675 	struct spinand_device *spinand = mtd_to_spinand(mtd);
676 	struct nand_device *nand = mtd_to_nanddev(mtd);
677 	struct nand_io_iter iter;
678 	bool disable_ecc = false;
679 	bool ecc_failed = false;
680 	int ret;
681 
682 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
683 		disable_ecc = true;
684 
685 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
686 		if (disable_ecc)
687 			iter.req.mode = MTD_OPS_RAW;
688 
689 		ret = spinand_select_target(spinand, iter.req.pos.target);
690 		if (ret)
691 			break;
692 
693 		ret = spinand_read_page(spinand, &iter.req);
694 		if (ret < 0 && ret != -EBADMSG)
695 			break;
696 
697 		if (ret == -EBADMSG)
698 			ecc_failed = true;
699 		else
700 			*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
701 
702 		ret = 0;
703 		ops->retlen += iter.req.datalen;
704 		ops->oobretlen += iter.req.ooblen;
705 	}
706 
707 	if (ecc_failed && !ret)
708 		ret = -EBADMSG;
709 
710 	return ret;
711 }
712 
spinand_mtd_continuous_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)713 static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
714 					    struct mtd_oob_ops *ops,
715 					    unsigned int *max_bitflips)
716 {
717 	struct spinand_device *spinand = mtd_to_spinand(mtd);
718 	struct nand_device *nand = mtd_to_nanddev(mtd);
719 	struct nand_io_iter iter;
720 	u8 status;
721 	int ret;
722 
723 	ret = spinand_cont_read_enable(spinand, true);
724 	if (ret)
725 		return ret;
726 
727 	/*
728 	 * The cache is divided into two halves. While one half of the cache has
729 	 * the requested data, the other half is loaded with the next chunk of data.
730 	 * Therefore, the host can read out the data continuously from page to page.
731 	 * Each data read must be a multiple of 4-bytes and full pages should be read;
732 	 * otherwise, the data output might get out of sequence from one read command
733 	 * to another.
734 	 */
735 	nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
736 		ret = spinand_select_target(spinand, iter.req.pos.target);
737 		if (ret)
738 			goto end_cont_read;
739 
740 		ret = nand_ecc_prepare_io_req(nand, &iter.req);
741 		if (ret)
742 			goto end_cont_read;
743 
744 		ret = spinand_load_page_op(spinand, &iter.req);
745 		if (ret)
746 			goto end_cont_read;
747 
748 		ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
749 				   SPINAND_READ_POLL_DELAY_US, NULL);
750 		if (ret < 0)
751 			goto end_cont_read;
752 
753 		ret = spinand_read_from_cache_op(spinand, &iter.req);
754 		if (ret)
755 			goto end_cont_read;
756 
757 		ops->retlen += iter.req.datalen;
758 
759 		ret = spinand_read_status(spinand, &status);
760 		if (ret)
761 			goto end_cont_read;
762 
763 		spinand_ondie_ecc_save_status(nand, status);
764 
765 		ret = nand_ecc_finish_io_req(nand, &iter.req);
766 		if (ret < 0)
767 			goto end_cont_read;
768 
769 		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
770 		ret = 0;
771 	}
772 
773 end_cont_read:
774 	/*
775 	 * Once all the data has been read out, the host can either pull CS#
776 	 * high and wait for tRST or manually clear the bit in the configuration
777 	 * register to terminate the continuous read operation. We have no
778 	 * guarantee the SPI controller drivers will effectively deassert the CS
779 	 * when we expect them to, so take the register based approach.
780 	 */
781 	spinand_cont_read_enable(spinand, false);
782 
783 	return ret;
784 }
785 
spinand_cont_read_init(struct spinand_device * spinand)786 static void spinand_cont_read_init(struct spinand_device *spinand)
787 {
788 	struct nand_device *nand = spinand_to_nand(spinand);
789 	enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
790 
791 	/* OOBs cannot be retrieved so external/on-host ECC engine won't work */
792 	if (spinand->set_cont_read &&
793 	    (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
794 	     engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
795 		spinand->cont_read_possible = true;
796 	}
797 }
798 
spinand_use_cont_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)799 static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
800 				  struct mtd_oob_ops *ops)
801 {
802 	struct nand_device *nand = mtd_to_nanddev(mtd);
803 	struct spinand_device *spinand = nand_to_spinand(nand);
804 	struct nand_pos start_pos, end_pos;
805 
806 	if (!spinand->cont_read_possible)
807 		return false;
808 
809 	/* OOBs won't be retrieved */
810 	if (ops->ooblen || ops->oobbuf)
811 		return false;
812 
813 	nanddev_offs_to_pos(nand, from, &start_pos);
814 	nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
815 
816 	/*
817 	 * Continuous reads never cross LUN boundaries. Some devices don't
818 	 * support crossing planes boundaries. Some devices don't even support
819 	 * crossing blocks boundaries. The common case being to read through UBI,
820 	 * we will very rarely read two consequent blocks or more, so it is safer
821 	 * and easier (can be improved) to only enable continuous reads when
822 	 * reading within the same erase block.
823 	 */
824 	if (start_pos.target != end_pos.target ||
825 	    start_pos.plane != end_pos.plane ||
826 	    start_pos.eraseblock != end_pos.eraseblock)
827 		return false;
828 
829 	return start_pos.page < end_pos.page;
830 }
831 
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)832 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
833 			    struct mtd_oob_ops *ops)
834 {
835 	struct spinand_device *spinand = mtd_to_spinand(mtd);
836 	struct mtd_ecc_stats old_stats;
837 	unsigned int max_bitflips = 0;
838 	int ret;
839 
840 	mutex_lock(&spinand->lock);
841 
842 	old_stats = mtd->ecc_stats;
843 
844 	if (spinand_use_cont_read(mtd, from, ops))
845 		ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
846 	else
847 		ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
848 
849 	if (ops->stats) {
850 		ops->stats->uncorrectable_errors +=
851 			mtd->ecc_stats.failed - old_stats.failed;
852 		ops->stats->corrected_bitflips +=
853 			mtd->ecc_stats.corrected - old_stats.corrected;
854 	}
855 
856 	mutex_unlock(&spinand->lock);
857 
858 	return ret ? ret : max_bitflips;
859 }
860 
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)861 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
862 			     struct mtd_oob_ops *ops)
863 {
864 	struct spinand_device *spinand = mtd_to_spinand(mtd);
865 	struct nand_device *nand = mtd_to_nanddev(mtd);
866 	struct nand_io_iter iter;
867 	bool disable_ecc = false;
868 	int ret = 0;
869 
870 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
871 		disable_ecc = true;
872 
873 	mutex_lock(&spinand->lock);
874 
875 	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
876 		if (disable_ecc)
877 			iter.req.mode = MTD_OPS_RAW;
878 
879 		ret = spinand_select_target(spinand, iter.req.pos.target);
880 		if (ret)
881 			break;
882 
883 		ret = spinand_write_page(spinand, &iter.req);
884 		if (ret)
885 			break;
886 
887 		ops->retlen += iter.req.datalen;
888 		ops->oobretlen += iter.req.ooblen;
889 	}
890 
891 	mutex_unlock(&spinand->lock);
892 
893 	return ret;
894 }
895 
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)896 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
897 {
898 	struct spinand_device *spinand = nand_to_spinand(nand);
899 	u8 marker[2] = { };
900 	struct nand_page_io_req req = {
901 		.pos = *pos,
902 		.ooblen = sizeof(marker),
903 		.ooboffs = 0,
904 		.oobbuf.in = marker,
905 		.mode = MTD_OPS_RAW,
906 	};
907 
908 	spinand_select_target(spinand, pos->target);
909 	spinand_read_page(spinand, &req);
910 	if (marker[0] != 0xff || marker[1] != 0xff)
911 		return true;
912 
913 	return false;
914 }
915 
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)916 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
917 {
918 	struct nand_device *nand = mtd_to_nanddev(mtd);
919 	struct spinand_device *spinand = nand_to_spinand(nand);
920 	struct nand_pos pos;
921 	int ret;
922 
923 	nanddev_offs_to_pos(nand, offs, &pos);
924 	mutex_lock(&spinand->lock);
925 	ret = nanddev_isbad(nand, &pos);
926 	mutex_unlock(&spinand->lock);
927 
928 	return ret;
929 }
930 
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)931 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
932 {
933 	struct spinand_device *spinand = nand_to_spinand(nand);
934 	u8 marker[2] = { };
935 	struct nand_page_io_req req = {
936 		.pos = *pos,
937 		.ooboffs = 0,
938 		.ooblen = sizeof(marker),
939 		.oobbuf.out = marker,
940 		.mode = MTD_OPS_RAW,
941 	};
942 	int ret;
943 
944 	ret = spinand_select_target(spinand, pos->target);
945 	if (ret)
946 		return ret;
947 
948 	ret = spinand_write_enable_op(spinand);
949 	if (ret)
950 		return ret;
951 
952 	return spinand_write_page(spinand, &req);
953 }
954 
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)955 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
956 {
957 	struct nand_device *nand = mtd_to_nanddev(mtd);
958 	struct spinand_device *spinand = nand_to_spinand(nand);
959 	struct nand_pos pos;
960 	int ret;
961 
962 	nanddev_offs_to_pos(nand, offs, &pos);
963 	mutex_lock(&spinand->lock);
964 	ret = nanddev_markbad(nand, &pos);
965 	mutex_unlock(&spinand->lock);
966 
967 	return ret;
968 }
969 
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)970 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
971 {
972 	struct spinand_device *spinand = nand_to_spinand(nand);
973 	u8 status;
974 	int ret;
975 
976 	ret = spinand_select_target(spinand, pos->target);
977 	if (ret)
978 		return ret;
979 
980 	ret = spinand_write_enable_op(spinand);
981 	if (ret)
982 		return ret;
983 
984 	ret = spinand_erase_op(spinand, pos);
985 	if (ret)
986 		return ret;
987 
988 	ret = spinand_wait(spinand,
989 			   SPINAND_ERASE_INITIAL_DELAY_US,
990 			   SPINAND_ERASE_POLL_DELAY_US,
991 			   &status);
992 
993 	if (!ret && (status & STATUS_ERASE_FAILED))
994 		ret = -EIO;
995 
996 	return ret;
997 }
998 
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)999 static int spinand_mtd_erase(struct mtd_info *mtd,
1000 			     struct erase_info *einfo)
1001 {
1002 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1003 	int ret;
1004 
1005 	mutex_lock(&spinand->lock);
1006 	ret = nanddev_mtd_erase(mtd, einfo);
1007 	mutex_unlock(&spinand->lock);
1008 
1009 	return ret;
1010 }
1011 
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)1012 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
1013 {
1014 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1015 	struct nand_device *nand = mtd_to_nanddev(mtd);
1016 	struct nand_pos pos;
1017 	int ret;
1018 
1019 	nanddev_offs_to_pos(nand, offs, &pos);
1020 	mutex_lock(&spinand->lock);
1021 	ret = nanddev_isreserved(nand, &pos);
1022 	mutex_unlock(&spinand->lock);
1023 
1024 	return ret;
1025 }
1026 
spinand_create_dirmap(struct spinand_device * spinand,unsigned int plane)1027 static int spinand_create_dirmap(struct spinand_device *spinand,
1028 				 unsigned int plane)
1029 {
1030 	struct nand_device *nand = spinand_to_nand(spinand);
1031 	struct spi_mem_dirmap_info info = {
1032 		.length = nanddev_page_size(nand) +
1033 			  nanddev_per_page_oobsize(nand),
1034 	};
1035 	struct spi_mem_dirmap_desc *desc;
1036 
1037 	if (spinand->cont_read_possible)
1038 		info.length = nanddev_eraseblock_size(nand);
1039 
1040 	/* The plane number is passed in MSB just above the column address */
1041 	info.offset = plane << fls(nand->memorg.pagesize);
1042 
1043 	info.op_tmpl = *spinand->op_templates.update_cache;
1044 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1045 					  spinand->spimem, &info);
1046 	if (IS_ERR(desc))
1047 		return PTR_ERR(desc);
1048 
1049 	spinand->dirmaps[plane].wdesc = desc;
1050 
1051 	info.op_tmpl = *spinand->op_templates.read_cache;
1052 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1053 					  spinand->spimem, &info);
1054 	if (IS_ERR(desc))
1055 		return PTR_ERR(desc);
1056 
1057 	spinand->dirmaps[plane].rdesc = desc;
1058 
1059 	if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
1060 		spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
1061 		spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
1062 
1063 		return 0;
1064 	}
1065 
1066 	info.op_tmpl = *spinand->op_templates.update_cache;
1067 	info.op_tmpl.data.ecc = true;
1068 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1069 					  spinand->spimem, &info);
1070 	if (IS_ERR(desc))
1071 		return PTR_ERR(desc);
1072 
1073 	spinand->dirmaps[plane].wdesc_ecc = desc;
1074 
1075 	info.op_tmpl = *spinand->op_templates.read_cache;
1076 	info.op_tmpl.data.ecc = true;
1077 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1078 					  spinand->spimem, &info);
1079 	if (IS_ERR(desc))
1080 		return PTR_ERR(desc);
1081 
1082 	spinand->dirmaps[plane].rdesc_ecc = desc;
1083 
1084 	return 0;
1085 }
1086 
spinand_create_dirmaps(struct spinand_device * spinand)1087 static int spinand_create_dirmaps(struct spinand_device *spinand)
1088 {
1089 	struct nand_device *nand = spinand_to_nand(spinand);
1090 	int i, ret;
1091 
1092 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
1093 					sizeof(*spinand->dirmaps) *
1094 					nand->memorg.planes_per_lun,
1095 					GFP_KERNEL);
1096 	if (!spinand->dirmaps)
1097 		return -ENOMEM;
1098 
1099 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
1100 		ret = spinand_create_dirmap(spinand, i);
1101 		if (ret)
1102 			return ret;
1103 	}
1104 
1105 	return 0;
1106 }
1107 
1108 static const struct nand_ops spinand_ops = {
1109 	.erase = spinand_erase,
1110 	.markbad = spinand_markbad,
1111 	.isbad = spinand_isbad,
1112 };
1113 
1114 static const struct spinand_manufacturer *spinand_manufacturers[] = {
1115 	&alliancememory_spinand_manufacturer,
1116 	&ato_spinand_manufacturer,
1117 	&esmt_c8_spinand_manufacturer,
1118 	&foresee_spinand_manufacturer,
1119 	&gigadevice_spinand_manufacturer,
1120 	&macronix_spinand_manufacturer,
1121 	&micron_spinand_manufacturer,
1122 	&paragon_spinand_manufacturer,
1123 	&toshiba_spinand_manufacturer,
1124 	&winbond_spinand_manufacturer,
1125 	&xtx_spinand_manufacturer,
1126 };
1127 
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)1128 static int spinand_manufacturer_match(struct spinand_device *spinand,
1129 				      enum spinand_readid_method rdid_method)
1130 {
1131 	u8 *id = spinand->id.data;
1132 	unsigned int i;
1133 	int ret;
1134 
1135 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
1136 		const struct spinand_manufacturer *manufacturer =
1137 			spinand_manufacturers[i];
1138 
1139 		if (id[0] != manufacturer->id)
1140 			continue;
1141 
1142 		ret = spinand_match_and_init(spinand,
1143 					     manufacturer->chips,
1144 					     manufacturer->nchips,
1145 					     rdid_method);
1146 		if (ret < 0)
1147 			continue;
1148 
1149 		spinand->manufacturer = manufacturer;
1150 		return 0;
1151 	}
1152 	return -EOPNOTSUPP;
1153 }
1154 
spinand_id_detect(struct spinand_device * spinand)1155 static int spinand_id_detect(struct spinand_device *spinand)
1156 {
1157 	u8 *id = spinand->id.data;
1158 	int ret;
1159 
1160 	ret = spinand_read_id_op(spinand, 0, 0, id);
1161 	if (ret)
1162 		return ret;
1163 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
1164 	if (!ret)
1165 		return 0;
1166 
1167 	ret = spinand_read_id_op(spinand, 1, 0, id);
1168 	if (ret)
1169 		return ret;
1170 	ret = spinand_manufacturer_match(spinand,
1171 					 SPINAND_READID_METHOD_OPCODE_ADDR);
1172 	if (!ret)
1173 		return 0;
1174 
1175 	ret = spinand_read_id_op(spinand, 0, 1, id);
1176 	if (ret)
1177 		return ret;
1178 	ret = spinand_manufacturer_match(spinand,
1179 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
1180 
1181 	return ret;
1182 }
1183 
spinand_manufacturer_init(struct spinand_device * spinand)1184 static int spinand_manufacturer_init(struct spinand_device *spinand)
1185 {
1186 	if (spinand->manufacturer->ops->init)
1187 		return spinand->manufacturer->ops->init(spinand);
1188 
1189 	return 0;
1190 }
1191 
spinand_manufacturer_cleanup(struct spinand_device * spinand)1192 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1193 {
1194 	/* Release manufacturer private data */
1195 	if (spinand->manufacturer->ops->cleanup)
1196 		return spinand->manufacturer->ops->cleanup(spinand);
1197 }
1198 
1199 static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)1200 spinand_select_op_variant(struct spinand_device *spinand,
1201 			  const struct spinand_op_variants *variants)
1202 {
1203 	struct nand_device *nand = spinand_to_nand(spinand);
1204 	unsigned int i;
1205 
1206 	for (i = 0; i < variants->nops; i++) {
1207 		struct spi_mem_op op = variants->ops[i];
1208 		unsigned int nbytes;
1209 		int ret;
1210 
1211 		nbytes = nanddev_per_page_oobsize(nand) +
1212 			 nanddev_page_size(nand);
1213 
1214 		while (nbytes) {
1215 			op.data.nbytes = nbytes;
1216 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
1217 			if (ret)
1218 				break;
1219 
1220 			if (!spi_mem_supports_op(spinand->spimem, &op))
1221 				break;
1222 
1223 			nbytes -= op.data.nbytes;
1224 		}
1225 
1226 		if (!nbytes)
1227 			return &variants->ops[i];
1228 	}
1229 
1230 	return NULL;
1231 }
1232 
1233 /**
1234  * spinand_match_and_init() - Try to find a match between a device ID and an
1235  *			      entry in a spinand_info table
1236  * @spinand: SPI NAND object
1237  * @table: SPI NAND device description table
1238  * @table_size: size of the device description table
1239  * @rdid_method: read id method to match
1240  *
1241  * Match between a device ID retrieved through the READ_ID command and an
1242  * entry in the SPI NAND description table. If a match is found, the spinand
1243  * object will be initialized with information provided by the matching
1244  * spinand_info entry.
1245  *
1246  * Return: 0 on success, a negative error code otherwise.
1247  */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)1248 int spinand_match_and_init(struct spinand_device *spinand,
1249 			   const struct spinand_info *table,
1250 			   unsigned int table_size,
1251 			   enum spinand_readid_method rdid_method)
1252 {
1253 	u8 *id = spinand->id.data;
1254 	struct nand_device *nand = spinand_to_nand(spinand);
1255 	unsigned int i;
1256 
1257 	for (i = 0; i < table_size; i++) {
1258 		const struct spinand_info *info = &table[i];
1259 		const struct spi_mem_op *op;
1260 
1261 		if (rdid_method != info->devid.method)
1262 			continue;
1263 
1264 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1265 			continue;
1266 
1267 		nand->memorg = table[i].memorg;
1268 		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1269 		spinand->eccinfo = table[i].eccinfo;
1270 		spinand->flags = table[i].flags;
1271 		spinand->id.len = 1 + table[i].devid.len;
1272 		spinand->select_target = table[i].select_target;
1273 		spinand->set_cont_read = table[i].set_cont_read;
1274 
1275 		op = spinand_select_op_variant(spinand,
1276 					       info->op_variants.read_cache);
1277 		if (!op)
1278 			return -ENOTSUPP;
1279 
1280 		spinand->op_templates.read_cache = op;
1281 
1282 		op = spinand_select_op_variant(spinand,
1283 					       info->op_variants.write_cache);
1284 		if (!op)
1285 			return -ENOTSUPP;
1286 
1287 		spinand->op_templates.write_cache = op;
1288 
1289 		op = spinand_select_op_variant(spinand,
1290 					       info->op_variants.update_cache);
1291 		spinand->op_templates.update_cache = op;
1292 
1293 		return 0;
1294 	}
1295 
1296 	return -ENOTSUPP;
1297 }
1298 
spinand_detect(struct spinand_device * spinand)1299 static int spinand_detect(struct spinand_device *spinand)
1300 {
1301 	struct device *dev = &spinand->spimem->spi->dev;
1302 	struct nand_device *nand = spinand_to_nand(spinand);
1303 	int ret;
1304 
1305 	ret = spinand_reset_op(spinand);
1306 	if (ret)
1307 		return ret;
1308 
1309 	ret = spinand_id_detect(spinand);
1310 	if (ret) {
1311 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1312 			spinand->id.data);
1313 		return ret;
1314 	}
1315 
1316 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1317 		dev_err(dev,
1318 			"SPI NANDs with more than one die must implement ->select_target()\n");
1319 		return -EINVAL;
1320 	}
1321 
1322 	dev_info(&spinand->spimem->spi->dev,
1323 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1324 	dev_info(&spinand->spimem->spi->dev,
1325 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1326 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1327 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1328 
1329 	return 0;
1330 }
1331 
spinand_init_flash(struct spinand_device * spinand)1332 static int spinand_init_flash(struct spinand_device *spinand)
1333 {
1334 	struct device *dev = &spinand->spimem->spi->dev;
1335 	struct nand_device *nand = spinand_to_nand(spinand);
1336 	int ret, i;
1337 
1338 	ret = spinand_read_cfg(spinand);
1339 	if (ret)
1340 		return ret;
1341 
1342 	ret = spinand_init_quad_enable(spinand);
1343 	if (ret)
1344 		return ret;
1345 
1346 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1347 	if (ret)
1348 		return ret;
1349 
1350 	ret = spinand_manufacturer_init(spinand);
1351 	if (ret) {
1352 		dev_err(dev,
1353 		"Failed to initialize the SPI NAND chip (err = %d)\n",
1354 		ret);
1355 		return ret;
1356 	}
1357 
1358 	/* After power up, all blocks are locked, so unlock them here. */
1359 	for (i = 0; i < nand->memorg.ntargets; i++) {
1360 		ret = spinand_select_target(spinand, i);
1361 		if (ret)
1362 			break;
1363 
1364 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1365 		if (ret)
1366 			break;
1367 	}
1368 
1369 	if (ret)
1370 		spinand_manufacturer_cleanup(spinand);
1371 
1372 	return ret;
1373 }
1374 
spinand_mtd_resume(struct mtd_info * mtd)1375 static void spinand_mtd_resume(struct mtd_info *mtd)
1376 {
1377 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1378 	int ret;
1379 
1380 	ret = spinand_reset_op(spinand);
1381 	if (ret)
1382 		return;
1383 
1384 	ret = spinand_init_flash(spinand);
1385 	if (ret)
1386 		return;
1387 
1388 	spinand_ecc_enable(spinand, false);
1389 }
1390 
spinand_init(struct spinand_device * spinand)1391 static int spinand_init(struct spinand_device *spinand)
1392 {
1393 	struct device *dev = &spinand->spimem->spi->dev;
1394 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1395 	struct nand_device *nand = mtd_to_nanddev(mtd);
1396 	int ret;
1397 
1398 	/*
1399 	 * We need a scratch buffer because the spi_mem interface requires that
1400 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1401 	 */
1402 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1403 	if (!spinand->scratchbuf)
1404 		return -ENOMEM;
1405 
1406 	ret = spinand_detect(spinand);
1407 	if (ret)
1408 		goto err_free_bufs;
1409 
1410 	/*
1411 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1412 	 * may use this buffer for DMA access.
1413 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1414 	 */
1415 	spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
1416 				   GFP_KERNEL);
1417 	if (!spinand->databuf) {
1418 		ret = -ENOMEM;
1419 		goto err_free_bufs;
1420 	}
1421 
1422 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1423 
1424 	ret = spinand_init_cfg_cache(spinand);
1425 	if (ret)
1426 		goto err_free_bufs;
1427 
1428 	ret = spinand_init_flash(spinand);
1429 	if (ret)
1430 		goto err_free_bufs;
1431 
1432 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1433 	if (ret)
1434 		goto err_manuf_cleanup;
1435 
1436 	/* SPI-NAND default ECC engine is on-die */
1437 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1438 	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1439 
1440 	spinand_ecc_enable(spinand, false);
1441 	ret = nanddev_ecc_engine_init(nand);
1442 	if (ret)
1443 		goto err_cleanup_nanddev;
1444 
1445 	/*
1446 	 * Continuous read can only be enabled with an on-die ECC engine, so the
1447 	 * ECC initialization must have happened previously.
1448 	 */
1449 	spinand_cont_read_init(spinand);
1450 
1451 	mtd->_read_oob = spinand_mtd_read;
1452 	mtd->_write_oob = spinand_mtd_write;
1453 	mtd->_block_isbad = spinand_mtd_block_isbad;
1454 	mtd->_block_markbad = spinand_mtd_block_markbad;
1455 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1456 	mtd->_erase = spinand_mtd_erase;
1457 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1458 	mtd->_resume = spinand_mtd_resume;
1459 
1460 	if (nand->ecc.engine) {
1461 		ret = mtd_ooblayout_count_freebytes(mtd);
1462 		if (ret < 0)
1463 			goto err_cleanup_ecc_engine;
1464 	}
1465 
1466 	mtd->oobavail = ret;
1467 
1468 	/* Propagate ECC information to mtd_info */
1469 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1470 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1471 	mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
1472 
1473 	ret = spinand_create_dirmaps(spinand);
1474 	if (ret) {
1475 		dev_err(dev,
1476 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1477 			ret);
1478 		goto err_cleanup_ecc_engine;
1479 	}
1480 
1481 	return 0;
1482 
1483 err_cleanup_ecc_engine:
1484 	nanddev_ecc_engine_cleanup(nand);
1485 
1486 err_cleanup_nanddev:
1487 	nanddev_cleanup(nand);
1488 
1489 err_manuf_cleanup:
1490 	spinand_manufacturer_cleanup(spinand);
1491 
1492 err_free_bufs:
1493 	kfree(spinand->databuf);
1494 	kfree(spinand->scratchbuf);
1495 	return ret;
1496 }
1497 
spinand_cleanup(struct spinand_device * spinand)1498 static void spinand_cleanup(struct spinand_device *spinand)
1499 {
1500 	struct nand_device *nand = spinand_to_nand(spinand);
1501 
1502 	nanddev_ecc_engine_cleanup(nand);
1503 	nanddev_cleanup(nand);
1504 	spinand_manufacturer_cleanup(spinand);
1505 	kfree(spinand->databuf);
1506 	kfree(spinand->scratchbuf);
1507 }
1508 
spinand_probe(struct spi_mem * mem)1509 static int spinand_probe(struct spi_mem *mem)
1510 {
1511 	struct spinand_device *spinand;
1512 	struct mtd_info *mtd;
1513 	int ret;
1514 
1515 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1516 			       GFP_KERNEL);
1517 	if (!spinand)
1518 		return -ENOMEM;
1519 
1520 	spinand->spimem = mem;
1521 	spi_mem_set_drvdata(mem, spinand);
1522 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1523 	mutex_init(&spinand->lock);
1524 	mtd = spinand_to_mtd(spinand);
1525 	mtd->dev.parent = &mem->spi->dev;
1526 
1527 	ret = spinand_init(spinand);
1528 	if (ret)
1529 		return ret;
1530 
1531 	ret = mtd_device_register(mtd, NULL, 0);
1532 	if (ret)
1533 		goto err_spinand_cleanup;
1534 
1535 	return 0;
1536 
1537 err_spinand_cleanup:
1538 	spinand_cleanup(spinand);
1539 
1540 	return ret;
1541 }
1542 
spinand_remove(struct spi_mem * mem)1543 static int spinand_remove(struct spi_mem *mem)
1544 {
1545 	struct spinand_device *spinand;
1546 	struct mtd_info *mtd;
1547 	int ret;
1548 
1549 	spinand = spi_mem_get_drvdata(mem);
1550 	mtd = spinand_to_mtd(spinand);
1551 
1552 	ret = mtd_device_unregister(mtd);
1553 	if (ret)
1554 		return ret;
1555 
1556 	spinand_cleanup(spinand);
1557 
1558 	return 0;
1559 }
1560 
1561 static const struct spi_device_id spinand_ids[] = {
1562 	{ .name = "spi-nand" },
1563 	{ /* sentinel */ },
1564 };
1565 MODULE_DEVICE_TABLE(spi, spinand_ids);
1566 
1567 #ifdef CONFIG_OF
1568 static const struct of_device_id spinand_of_ids[] = {
1569 	{ .compatible = "spi-nand" },
1570 	{ /* sentinel */ },
1571 };
1572 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1573 #endif
1574 
1575 static struct spi_mem_driver spinand_drv = {
1576 	.spidrv = {
1577 		.id_table = spinand_ids,
1578 		.driver = {
1579 			.name = "spi-nand",
1580 			.of_match_table = of_match_ptr(spinand_of_ids),
1581 		},
1582 	},
1583 	.probe = spinand_probe,
1584 	.remove = spinand_remove,
1585 };
1586 module_spi_mem_driver(spinand_drv);
1587 
1588 MODULE_DESCRIPTION("SPI NAND framework");
1589 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1590 MODULE_LICENSE("GPL v2");
1591