• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/mtd/devices/goldfish_nand.c
3  *
4  * Copyright (C) 2007 Google, Inc.
5  * Copyright (C) 2012 Intel, Inc.
6  * Copyright (C) 2013 Intel, Inc.
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/io.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/ioport.h>
24 #include <linux/vmalloc.h>
25 #include <linux/init.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/platform_device.h>
28 
29 #include <asm/div64.h>
30 
31 #include "goldfish_nand_reg.h"
32 
33 struct goldfish_nand {
34 	spinlock_t              lock;
35 	unsigned char __iomem  *base;
36 	struct cmd_params       *cmd_params;
37 	size_t                  mtd_count;
38 	struct mtd_info         mtd[0];
39 };
40 
goldfish_nand_cmd_with_params(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr,u32 * rv)41 static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
42 			enum nand_cmd cmd, u64 addr, u32 len,
43 			void *ptr, u32 *rv)
44 {
45 	u32 cmdp;
46 	struct goldfish_nand *nand = mtd->priv;
47 	struct cmd_params *cps = nand->cmd_params;
48 	unsigned char __iomem  *base = nand->base;
49 
50 	if (cps == NULL)
51 		return -1;
52 
53 	switch (cmd) {
54 	case NAND_CMD_ERASE:
55 		cmdp = NAND_CMD_ERASE_WITH_PARAMS;
56 		break;
57 	case NAND_CMD_READ:
58 		cmdp = NAND_CMD_READ_WITH_PARAMS;
59 		break;
60 	case NAND_CMD_WRITE:
61 		cmdp = NAND_CMD_WRITE_WITH_PARAMS;
62 		break;
63 	default:
64 		return -1;
65 	}
66 	cps->dev = mtd - nand->mtd;
67 	cps->addr_high = (u32)(addr >> 32);
68 	cps->addr_low = (u32)addr;
69 	cps->transfer_size = len;
70 	cps->data = (unsigned long)ptr;
71 	writel(cmdp, base + NAND_COMMAND);
72 	*rv = cps->result;
73 	return 0;
74 }
75 
goldfish_nand_cmd(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr)76 static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
77 				u64 addr, u32 len, void *ptr)
78 {
79 	struct goldfish_nand *nand = mtd->priv;
80 	u32 rv;
81 	unsigned long irq_flags;
82 	unsigned char __iomem  *base = nand->base;
83 
84 	spin_lock_irqsave(&nand->lock, irq_flags);
85 	if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
86 		writel(mtd - nand->mtd, base + NAND_DEV);
87 		writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
88 		writel((u32)addr, base + NAND_ADDR_LOW);
89 		writel(len, base + NAND_TRANSFER_SIZE);
90 		writel((u32)ptr, base + NAND_DATA);
91 #ifdef CONFIG_64BIT
92 		writel((u32)((u64)ptr >> 32), base + NAND_DATA_HIGH);
93 #endif
94 		writel(cmd, base + NAND_COMMAND);
95 		rv = readl(base + NAND_RESULT);
96 	}
97 	spin_unlock_irqrestore(&nand->lock, irq_flags);
98 	return rv;
99 }
100 
goldfish_nand_erase(struct mtd_info * mtd,struct erase_info * instr)101 static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
102 {
103 	loff_t ofs = instr->addr;
104 	u32 len = instr->len;
105 	u32 rem;
106 
107 	if (ofs + len > mtd->size)
108 		goto invalid_arg;
109 	rem = do_div(ofs, mtd->writesize);
110 	if (rem)
111 		goto invalid_arg;
112 	ofs *= (mtd->writesize + mtd->oobsize);
113 
114 	if (len % mtd->writesize)
115 		goto invalid_arg;
116 	len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
117 
118 	if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
119 		pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
120 			ofs, len, mtd->size, mtd->erasesize);
121 		return -EIO;
122 	}
123 
124 	instr->state = MTD_ERASE_DONE;
125 	mtd_erase_callback(instr);
126 
127 	return 0;
128 
129 invalid_arg:
130 	pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
131 		ofs, len, mtd->size, mtd->erasesize);
132 	return -EINVAL;
133 }
134 
goldfish_nand_read_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)135 static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
136 				struct mtd_oob_ops *ops)
137 {
138 	u32 rem;
139 
140 	if (ofs + ops->len > mtd->size)
141 		goto invalid_arg;
142 	if (ops->datbuf && ops->len && ops->len != mtd->writesize)
143 		goto invalid_arg;
144 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
145 		goto invalid_arg;
146 
147 	rem = do_div(ofs, mtd->writesize);
148 	if (rem)
149 		goto invalid_arg;
150 	ofs *= (mtd->writesize + mtd->oobsize);
151 
152 	if (ops->datbuf)
153 		ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
154 						ops->len, ops->datbuf);
155 	ofs += mtd->writesize + ops->ooboffs;
156 	if (ops->oobbuf)
157 		ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
158 						ops->ooblen, ops->oobbuf);
159 	return 0;
160 
161 invalid_arg:
162 	pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
163 		ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
164 	return -EINVAL;
165 }
166 
goldfish_nand_write_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)167 static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
168 				struct mtd_oob_ops *ops)
169 {
170 	u32 rem;
171 
172 	if (ofs + ops->len > mtd->size)
173 		goto invalid_arg;
174 	if (ops->len && ops->len != mtd->writesize)
175 		goto invalid_arg;
176 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
177 		goto invalid_arg;
178 
179 	rem = do_div(ofs, mtd->writesize);
180 	if (rem)
181 		goto invalid_arg;
182 	ofs *= (mtd->writesize + mtd->oobsize);
183 
184 	if (ops->datbuf)
185 		ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
186 						ops->len, ops->datbuf);
187 	ofs += mtd->writesize + ops->ooboffs;
188 	if (ops->oobbuf)
189 		ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
190 						ops->ooblen, ops->oobbuf);
191 	return 0;
192 
193 invalid_arg:
194 	pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
195 		ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
196 	return -EINVAL;
197 }
198 
goldfish_nand_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)199 static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
200 				size_t *retlen, u_char *buf)
201 {
202 	u32 rem;
203 
204 	if (from + len > mtd->size)
205 		goto invalid_arg;
206 
207 	rem = do_div(from, mtd->writesize);
208 	if (rem)
209 		goto invalid_arg;
210 	from *= (mtd->writesize + mtd->oobsize);
211 
212 	*retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
213 	return 0;
214 
215 invalid_arg:
216 	pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
217 		from, len, mtd->size, mtd->writesize);
218 	return -EINVAL;
219 }
220 
goldfish_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)221 static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
222 				size_t *retlen, const u_char *buf)
223 {
224 	u32 rem;
225 
226 	if (to + len > mtd->size)
227 		goto invalid_arg;
228 
229 	rem = do_div(to, mtd->writesize);
230 	if (rem)
231 		goto invalid_arg;
232 	to *= (mtd->writesize + mtd->oobsize);
233 
234 	*retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
235 	return 0;
236 
237 invalid_arg:
238 	pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
239 		to, len, mtd->size, mtd->writesize);
240 	return -EINVAL;
241 }
242 
goldfish_nand_block_isbad(struct mtd_info * mtd,loff_t ofs)243 static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
244 {
245 	u32 rem;
246 
247 	if (ofs >= mtd->size)
248 		goto invalid_arg;
249 
250 	rem = do_div(ofs, mtd->erasesize);
251 	if (rem)
252 		goto invalid_arg;
253 	ofs *= mtd->erasesize / mtd->writesize;
254 	ofs *= (mtd->writesize + mtd->oobsize);
255 
256 	return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
257 
258 invalid_arg:
259 	pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
260 		ofs, mtd->size, mtd->writesize);
261 	return -EINVAL;
262 }
263 
goldfish_nand_block_markbad(struct mtd_info * mtd,loff_t ofs)264 static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
265 {
266 	u32 rem;
267 
268 	if (ofs >= mtd->size)
269 		goto invalid_arg;
270 
271 	rem = do_div(ofs, mtd->erasesize);
272 	if (rem)
273 		goto invalid_arg;
274 	ofs *= mtd->erasesize / mtd->writesize;
275 	ofs *= (mtd->writesize + mtd->oobsize);
276 
277 	if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
278 		return -EIO;
279 	return 0;
280 
281 invalid_arg:
282 	pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
283 		ofs, mtd->size, mtd->writesize);
284 	return -EINVAL;
285 }
286 
nand_setup_cmd_params(struct platform_device * pdev,struct goldfish_nand * nand)287 static int nand_setup_cmd_params(struct platform_device *pdev,
288 						struct goldfish_nand *nand)
289 {
290 	u64 paddr;
291 	unsigned char __iomem  *base = nand->base;
292 
293 	nand->cmd_params = devm_kzalloc(&pdev->dev,
294 					sizeof(struct cmd_params), GFP_KERNEL);
295 	if (!nand->cmd_params)
296 		return -1;
297 
298 	paddr = __pa(nand->cmd_params);
299 	writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
300 	writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
301 	return 0;
302 }
303 
goldfish_nand_init_device(struct platform_device * pdev,struct goldfish_nand * nand,int id)304 static int goldfish_nand_init_device(struct platform_device *pdev,
305 					struct goldfish_nand *nand, int id)
306 {
307 	u32 name_len;
308 	u32 result;
309 	u32 flags;
310 	unsigned long irq_flags;
311 	unsigned char __iomem  *base = nand->base;
312 	struct mtd_info *mtd = &nand->mtd[id];
313 	char *name;
314 
315 	spin_lock_irqsave(&nand->lock, irq_flags);
316 	writel(id, base + NAND_DEV);
317 	flags = readl(base + NAND_DEV_FLAGS);
318 	name_len = readl(base + NAND_DEV_NAME_LEN);
319 	mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
320 	mtd->size = readl(base + NAND_DEV_SIZE_LOW);
321 	mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
322 	mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
323 	mtd->oobavail = mtd->oobsize;
324 	mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
325 			(mtd->writesize + mtd->oobsize) * mtd->writesize;
326 	do_div(mtd->size, mtd->writesize + mtd->oobsize);
327 	mtd->size *= mtd->writesize;
328 	dev_dbg(&pdev->dev,
329 		"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
330 		       id, mtd->size, mtd->writesize, mtd->oobsize, mtd->erasesize);
331 	spin_unlock_irqrestore(&nand->lock, irq_flags);
332 
333 	mtd->priv = nand;
334 
335 	mtd->name = name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
336 	if (name == NULL)
337 		return -ENOMEM;
338 
339 	result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
340 									name);
341 	if (result != name_len) {
342 		dev_err(&pdev->dev,
343 			"goldfish_nand_init_device failed to get dev name %d != %d\n",
344 			       result, name_len);
345 		return -ENODEV;
346 	}
347 	((char *) mtd->name)[name_len] = '\0';
348 
349 	/* Setup the MTD structure */
350 	mtd->type = MTD_NANDFLASH;
351 	mtd->flags = MTD_CAP_NANDFLASH;
352 	if (flags & NAND_DEV_FLAG_READ_ONLY)
353 		mtd->flags &= ~MTD_WRITEABLE;
354 	if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
355 		nand_setup_cmd_params(pdev, nand);
356 
357 	mtd->owner = THIS_MODULE;
358 	mtd->_erase = goldfish_nand_erase;
359 	mtd->_read = goldfish_nand_read;
360 	mtd->_write = goldfish_nand_write;
361 	mtd->_read_oob = goldfish_nand_read_oob;
362 	mtd->_write_oob = goldfish_nand_write_oob;
363 	mtd->_block_isbad = goldfish_nand_block_isbad;
364 	mtd->_block_markbad = goldfish_nand_block_markbad;
365 
366 	if (mtd_device_register(mtd, NULL, 0))
367 		return -EIO;
368 
369 	return 0;
370 }
371 
goldfish_nand_probe(struct platform_device * pdev)372 static int goldfish_nand_probe(struct platform_device *pdev)
373 {
374 	u32 num_dev;
375 	int i;
376 	int err;
377 	u32 num_dev_working;
378 	u32 version;
379 	struct resource *r;
380 	struct goldfish_nand *nand;
381 	unsigned char __iomem  *base;
382 
383 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 	if (r == NULL)
385 		return -ENODEV;
386 
387 	base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
388 	if (base == NULL)
389 		return -ENOMEM;
390 
391 	version = readl(base + NAND_VERSION);
392 	if (version != NAND_VERSION_CURRENT) {
393 		dev_err(&pdev->dev,
394 			"goldfish_nand_init: version mismatch, got %d, expected %d\n",
395 				version, NAND_VERSION_CURRENT);
396 		return -ENODEV;
397 	}
398 	num_dev = readl(base + NAND_NUM_DEV);
399 	if (num_dev == 0)
400 		return -ENODEV;
401 
402 	nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
403 				sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
404 	if (nand == NULL)
405 		return -ENOMEM;
406 
407 	spin_lock_init(&nand->lock);
408 	nand->base = base;
409 	nand->mtd_count = num_dev;
410 	platform_set_drvdata(pdev, nand);
411 
412 	num_dev_working = 0;
413 	for (i = 0; i < num_dev; i++) {
414 		err = goldfish_nand_init_device(pdev, nand, i);
415 		if (err == 0)
416 			num_dev_working++;
417 	}
418 	if (num_dev_working == 0)
419 		return -ENODEV;
420 	return 0;
421 }
422 
goldfish_nand_remove(struct platform_device * pdev)423 static int goldfish_nand_remove(struct platform_device *pdev)
424 {
425 	struct goldfish_nand *nand = platform_get_drvdata(pdev);
426 	int i;
427 	for (i = 0; i < nand->mtd_count; i++) {
428 		if (nand->mtd[i].name)
429 			mtd_device_unregister(&nand->mtd[i]);
430 	}
431 	return 0;
432 }
433 
434 static struct platform_driver goldfish_nand_driver = {
435 	.probe		= goldfish_nand_probe,
436 	.remove		= goldfish_nand_remove,
437 	.driver = {
438 		.name = "goldfish_nand"
439 	}
440 };
441 
442 module_platform_driver(goldfish_nand_driver);
443 MODULE_LICENSE("GPL");
444