1 /*
2 * drivers/mtd/devices/goldfish_nand.c
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Copyright (C) 2012 Intel, Inc.
6 * Copyright (C) 2013 Intel, Inc.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19 #include <linux/io.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/ioport.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/platform_device.h>
27 #include <linux/mutex.h>
28 #include <linux/goldfish.h>
29 #include <asm/div64.h>
30 #include <linux/dma-mapping.h>
31
32 #include "goldfish_nand_reg.h"
33
34 struct goldfish_nand {
35 /* lock protects access to the device registers */
36 struct mutex lock;
37 unsigned char __iomem *base;
38 struct cmd_params *cmd_params;
39 size_t mtd_count;
40 struct mtd_info mtd[0];
41 };
42
goldfish_nand_cmd_with_params(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr,u32 * rv)43 static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
44 enum nand_cmd cmd, u64 addr, u32 len,
45 void *ptr, u32 *rv)
46 {
47 u32 cmdp;
48 struct goldfish_nand *nand = mtd->priv;
49 struct cmd_params *cps = nand->cmd_params;
50 unsigned char __iomem *base = nand->base;
51
52 if (!cps)
53 return -1;
54
55 switch (cmd) {
56 case NAND_CMD_ERASE:
57 cmdp = NAND_CMD_ERASE_WITH_PARAMS;
58 break;
59 case NAND_CMD_READ:
60 cmdp = NAND_CMD_READ_WITH_PARAMS;
61 break;
62 case NAND_CMD_WRITE:
63 cmdp = NAND_CMD_WRITE_WITH_PARAMS;
64 break;
65 default:
66 return -1;
67 }
68 cps->dev = mtd - nand->mtd;
69 cps->addr_high = (u32)(addr >> 32);
70 cps->addr_low = (u32)addr;
71 cps->transfer_size = len;
72 cps->data = (unsigned long)ptr;
73 writel(cmdp, base + NAND_COMMAND);
74 *rv = cps->result;
75 return 0;
76 }
77
goldfish_nand_cmd(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr)78 static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
79 u64 addr, u32 len, void *ptr)
80 {
81 struct goldfish_nand *nand = mtd->priv;
82 u32 rv;
83 unsigned char __iomem *base = nand->base;
84
85 mutex_lock(&nand->lock);
86 if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
87 writel(mtd - nand->mtd, base + NAND_DEV);
88 writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
89 writel((u32)addr, base + NAND_ADDR_LOW);
90 writel(len, base + NAND_TRANSFER_SIZE);
91 gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
92 writel(cmd, base + NAND_COMMAND);
93 rv = readl(base + NAND_RESULT);
94 }
95 mutex_unlock(&nand->lock);
96 return rv;
97 }
98
goldfish_nand_erase(struct mtd_info * mtd,struct erase_info * instr)99 static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
100 {
101 loff_t ofs = instr->addr;
102 u32 len = instr->len;
103 s32 rem;
104
105 if (ofs + len > mtd->size)
106 goto invalid_arg;
107 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
108 if (rem)
109 goto invalid_arg;
110 ofs *= (mtd->writesize + mtd->oobsize);
111
112 if (len % mtd->writesize)
113 goto invalid_arg;
114 len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
115
116 if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
117 pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
118 ofs, len, mtd->size, mtd->erasesize);
119 return -EIO;
120 }
121
122 instr->state = MTD_ERASE_DONE;
123 mtd_erase_callback(instr);
124
125 return 0;
126
127 invalid_arg:
128 pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
129 ofs, len, mtd->size, mtd->erasesize);
130 return -EINVAL;
131 }
132
goldfish_nand_read_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)133 static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
134 struct mtd_oob_ops *ops)
135 {
136 s32 rem;
137
138 if (ofs + ops->len > mtd->size)
139 goto invalid_arg;
140 if (ops->datbuf && ops->len && ops->len != mtd->writesize)
141 goto invalid_arg;
142 if (ops->ooblen + ops->ooboffs > mtd->oobsize)
143 goto invalid_arg;
144
145 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
146 if (rem)
147 goto invalid_arg;
148 ofs *= (mtd->writesize + mtd->oobsize);
149
150 if (ops->datbuf)
151 ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
152 ops->len, ops->datbuf);
153 ofs += mtd->writesize + ops->ooboffs;
154 if (ops->oobbuf)
155 ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
156 ops->ooblen, ops->oobbuf);
157 return 0;
158
159 invalid_arg:
160 pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
161 ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
162 return -EINVAL;
163 }
164
goldfish_nand_write_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)165 static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
166 struct mtd_oob_ops *ops)
167 {
168 s32 rem;
169
170 if (ofs + ops->len > mtd->size)
171 goto invalid_arg;
172 if (ops->len && ops->len != mtd->writesize)
173 goto invalid_arg;
174 if (ops->ooblen + ops->ooboffs > mtd->oobsize)
175 goto invalid_arg;
176
177 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
178 if (rem)
179 goto invalid_arg;
180 ofs *= (mtd->writesize + mtd->oobsize);
181
182 if (ops->datbuf)
183 ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
184 ops->len, ops->datbuf);
185 ofs += mtd->writesize + ops->ooboffs;
186 if (ops->oobbuf)
187 ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
188 ops->ooblen, ops->oobbuf);
189 return 0;
190
191 invalid_arg:
192 pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
193 ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
194 return -EINVAL;
195 }
196
goldfish_nand_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)197 static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
198 size_t *retlen, u_char *buf)
199 {
200 s32 rem;
201
202 if (from + len > mtd->size)
203 goto invalid_arg;
204
205 from = div_s64_rem(from, mtd->writesize, &rem);
206 if (rem)
207 goto invalid_arg;
208 from *= (mtd->writesize + mtd->oobsize);
209
210 *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
211 return 0;
212
213 invalid_arg:
214 pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
215 from, len, mtd->size, mtd->writesize);
216 return -EINVAL;
217 }
218
goldfish_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)219 static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
220 size_t *retlen, const u_char *buf)
221 {
222 s32 rem;
223
224 if (to + len > mtd->size)
225 goto invalid_arg;
226
227 to = div_s64_rem(to, mtd->writesize, &rem);
228 if (rem)
229 goto invalid_arg;
230 to *= (mtd->writesize + mtd->oobsize);
231
232 *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
233 return 0;
234
235 invalid_arg:
236 pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
237 to, len, mtd->size, mtd->writesize);
238 return -EINVAL;
239 }
240
goldfish_nand_block_isbad(struct mtd_info * mtd,loff_t ofs)241 static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
242 {
243 s32 rem;
244
245 if (ofs >= mtd->size)
246 goto invalid_arg;
247
248 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
249 if (rem)
250 goto invalid_arg;
251 ofs *= mtd->erasesize / mtd->writesize;
252 ofs *= (mtd->writesize + mtd->oobsize);
253
254 return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
255
256 invalid_arg:
257 pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
258 ofs, mtd->size, mtd->writesize);
259 return -EINVAL;
260 }
261
goldfish_nand_block_markbad(struct mtd_info * mtd,loff_t ofs)262 static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
263 {
264 s32 rem;
265
266 if (ofs >= mtd->size)
267 goto invalid_arg;
268
269 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
270 if (rem)
271 goto invalid_arg;
272 ofs *= mtd->erasesize / mtd->writesize;
273 ofs *= (mtd->writesize + mtd->oobsize);
274
275 if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
276 return -EIO;
277 return 0;
278
279 invalid_arg:
280 pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
281 ofs, mtd->size, mtd->writesize);
282 return -EINVAL;
283 }
284
nand_setup_cmd_params(struct platform_device * pdev,struct goldfish_nand * nand)285 static int nand_setup_cmd_params(struct platform_device *pdev,
286 struct goldfish_nand *nand)
287 {
288 dma_addr_t dma_handle;
289 unsigned char __iomem *base = nand->base;
290
291 nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
292 sizeof(struct cmd_params),
293 &dma_handle, GFP_KERNEL);
294 if (!nand->cmd_params) {
295 dev_err(&pdev->dev, "allocate buffer failed\n");
296 return -ENOMEM;
297 }
298 writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
299 writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
300 return 0;
301 }
302
goldfish_nand_init_device(struct platform_device * pdev,struct goldfish_nand * nand,int id)303 static int goldfish_nand_init_device(struct platform_device *pdev,
304 struct goldfish_nand *nand, int id)
305 {
306 u32 name_len;
307 u32 result;
308 u32 flags;
309 unsigned char __iomem *base = nand->base;
310 struct mtd_info *mtd = &nand->mtd[id];
311 char *name;
312
313 mutex_lock(&nand->lock);
314 writel(id, base + NAND_DEV);
315 flags = readl(base + NAND_DEV_FLAGS);
316 name_len = readl(base + NAND_DEV_NAME_LEN);
317 mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
318 mtd->size = readl(base + NAND_DEV_SIZE_LOW);
319 mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
320 mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
321 mtd->oobavail = mtd->oobsize;
322 mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
323 (mtd->writesize + mtd->oobsize) * mtd->writesize;
324 mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
325 mtd->size *= mtd->writesize;
326 dev_dbg(&pdev->dev,
327 "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
328 id, mtd->size, mtd->writesize,
329 mtd->oobsize, mtd->erasesize);
330 mutex_unlock(&nand->lock);
331
332 mtd->priv = nand;
333
334 name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
335 if (!name)
336 return -ENOMEM;
337 mtd->name = name;
338
339 result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
340 name);
341 if (result != name_len) {
342 dev_err(&pdev->dev,
343 "goldfish_nand_init_device failed to get dev name %d != %d\n",
344 result, name_len);
345 return -ENODEV;
346 }
347 ((char *)mtd->name)[name_len] = '\0';
348
349 /* Setup the MTD structure */
350 mtd->type = MTD_NANDFLASH;
351 mtd->flags = MTD_CAP_NANDFLASH;
352 if (flags & NAND_DEV_FLAG_READ_ONLY)
353 mtd->flags &= ~MTD_WRITEABLE;
354 if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
355 nand_setup_cmd_params(pdev, nand);
356
357 mtd->owner = THIS_MODULE;
358 mtd->_erase = goldfish_nand_erase;
359 mtd->_read = goldfish_nand_read;
360 mtd->_write = goldfish_nand_write;
361 mtd->_read_oob = goldfish_nand_read_oob;
362 mtd->_write_oob = goldfish_nand_write_oob;
363 mtd->_block_isbad = goldfish_nand_block_isbad;
364 mtd->_block_markbad = goldfish_nand_block_markbad;
365
366 if (mtd_device_register(mtd, NULL, 0))
367 return -EIO;
368
369 return 0;
370 }
371
goldfish_nand_probe(struct platform_device * pdev)372 static int goldfish_nand_probe(struct platform_device *pdev)
373 {
374 u32 num_dev;
375 int i;
376 int err;
377 u32 num_dev_working;
378 u32 version;
379 struct resource *r;
380 struct goldfish_nand *nand;
381 unsigned char __iomem *base;
382
383 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 if (!r)
385 return -ENODEV;
386
387 base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
388 if (!base)
389 return -ENOMEM;
390
391 version = readl(base + NAND_VERSION);
392 if (version != NAND_VERSION_CURRENT) {
393 dev_err(&pdev->dev,
394 "goldfish_nand_init: version mismatch, got %d, expected %d\n",
395 version, NAND_VERSION_CURRENT);
396 return -ENODEV;
397 }
398 num_dev = readl(base + NAND_NUM_DEV);
399 if (num_dev == 0)
400 return -ENODEV;
401
402 nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
403 sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
404 if (!nand)
405 return -ENOMEM;
406
407 mutex_init(&nand->lock);
408 nand->base = base;
409 nand->mtd_count = num_dev;
410 platform_set_drvdata(pdev, nand);
411
412 num_dev_working = 0;
413 for (i = 0; i < num_dev; i++) {
414 err = goldfish_nand_init_device(pdev, nand, i);
415 if (err == 0)
416 num_dev_working++;
417 }
418 if (num_dev_working == 0)
419 return -ENODEV;
420 return 0;
421 }
422
goldfish_nand_remove(struct platform_device * pdev)423 static int goldfish_nand_remove(struct platform_device *pdev)
424 {
425 struct goldfish_nand *nand = platform_get_drvdata(pdev);
426 int i;
427
428 for (i = 0; i < nand->mtd_count; i++) {
429 if (nand->mtd[i].name)
430 mtd_device_unregister(&nand->mtd[i]);
431 }
432 return 0;
433 }
434
435 static struct platform_driver goldfish_nand_driver = {
436 .probe = goldfish_nand_probe,
437 .remove = goldfish_nand_remove,
438 .driver = {
439 .name = "goldfish_nand"
440 }
441 };
442
443 module_platform_driver(goldfish_nand_driver);
444 MODULE_LICENSE("GPL");
445