1 /*
2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/hdreg.h>
14 #include <linux/kthread.h>
15 #include <linux/freezer.h>
16 #include <linux/sysfs.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include <linux/mtd/nand_ecc.h>
20 #include "nand/sm_common.h"
21 #include "sm_ftl.h"
22
23
24
25 static struct workqueue_struct *cache_flush_workqueue;
26
27 static int cache_timeout = 1000;
28 module_param(cache_timeout, int, S_IRUGO);
29 MODULE_PARM_DESC(cache_timeout,
30 "Timeout (in ms) for cache flush (1000 ms default");
31
32 static int debug;
33 module_param(debug, int, S_IRUGO | S_IWUSR);
34 MODULE_PARM_DESC(debug, "Debug level (0-2)");
35
36
37 /* ------------------- sysfs attributes ---------------------------------- */
38 struct sm_sysfs_attribute {
39 struct device_attribute dev_attr;
40 char *data;
41 int len;
42 };
43
sm_attr_show(struct device * dev,struct device_attribute * attr,char * buf)44 static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
45 char *buf)
46 {
47 struct sm_sysfs_attribute *sm_attr =
48 container_of(attr, struct sm_sysfs_attribute, dev_attr);
49
50 strncpy(buf, sm_attr->data, sm_attr->len);
51 return sm_attr->len;
52 }
53
54
55 #define NUM_ATTRIBUTES 1
56 #define SM_CIS_VENDOR_OFFSET 0x59
sm_create_sysfs_attributes(struct sm_ftl * ftl)57 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
58 {
59 struct attribute_group *attr_group;
60 struct attribute **attributes;
61 struct sm_sysfs_attribute *vendor_attribute;
62 char *vendor;
63
64 vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
65 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
66 if (!vendor)
67 goto error1;
68
69 /* Initialize sysfs attributes */
70 vendor_attribute =
71 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
72 if (!vendor_attribute)
73 goto error2;
74
75 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
76
77 vendor_attribute->data = vendor;
78 vendor_attribute->len = strlen(vendor);
79 vendor_attribute->dev_attr.attr.name = "vendor";
80 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
81 vendor_attribute->dev_attr.show = sm_attr_show;
82
83
84 /* Create array of pointers to the attributes */
85 attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
86 GFP_KERNEL);
87 if (!attributes)
88 goto error3;
89 attributes[0] = &vendor_attribute->dev_attr.attr;
90
91 /* Finally create the attribute group */
92 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
93 if (!attr_group)
94 goto error4;
95 attr_group->attrs = attributes;
96 return attr_group;
97 error4:
98 kfree(attributes);
99 error3:
100 kfree(vendor_attribute);
101 error2:
102 kfree(vendor);
103 error1:
104 return NULL;
105 }
106
sm_delete_sysfs_attributes(struct sm_ftl * ftl)107 static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
108 {
109 struct attribute **attributes = ftl->disk_attributes->attrs;
110 int i;
111
112 for (i = 0; attributes[i] ; i++) {
113
114 struct device_attribute *dev_attr = container_of(attributes[i],
115 struct device_attribute, attr);
116
117 struct sm_sysfs_attribute *sm_attr =
118 container_of(dev_attr,
119 struct sm_sysfs_attribute, dev_attr);
120
121 kfree(sm_attr->data);
122 kfree(sm_attr);
123 }
124
125 kfree(ftl->disk_attributes->attrs);
126 kfree(ftl->disk_attributes);
127 }
128
129
130 /* ----------------------- oob helpers -------------------------------------- */
131
sm_get_lba(uint8_t * lba)132 static int sm_get_lba(uint8_t *lba)
133 {
134 /* check fixed bits */
135 if ((lba[0] & 0xF8) != 0x10)
136 return -2;
137
138 /* check parity - endianness doesn't matter */
139 if (hweight16(*(uint16_t *)lba) & 1)
140 return -2;
141
142 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
143 }
144
145
146 /*
147 * Read LBA associated with block
148 * returns -1, if block is erased
149 * returns -2 if error happens
150 */
sm_read_lba(struct sm_oob * oob)151 static int sm_read_lba(struct sm_oob *oob)
152 {
153 static const uint32_t erased_pattern[4] = {
154 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
155
156 uint16_t lba_test;
157 int lba;
158
159 /* First test for erased block */
160 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
161 return -1;
162
163 /* Now check is both copies of the LBA differ too much */
164 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
165 if (lba_test && !is_power_of_2(lba_test))
166 return -2;
167
168 /* And read it */
169 lba = sm_get_lba(oob->lba_copy1);
170
171 if (lba == -2)
172 lba = sm_get_lba(oob->lba_copy2);
173
174 return lba;
175 }
176
sm_write_lba(struct sm_oob * oob,uint16_t lba)177 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
178 {
179 uint8_t tmp[2];
180
181 WARN_ON(lba >= 1000);
182
183 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
184 tmp[1] = (lba << 1) & 0xFF;
185
186 if (hweight16(*(uint16_t *)tmp) & 0x01)
187 tmp[1] |= 1;
188
189 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
190 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
191 }
192
193
194 /* Make offset from parts */
sm_mkoffset(struct sm_ftl * ftl,int zone,int block,int boffset)195 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
196 {
197 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
198 WARN_ON(zone < 0 || zone >= ftl->zone_count);
199 WARN_ON(block >= ftl->zone_size);
200 WARN_ON(boffset >= ftl->block_size);
201
202 if (block == -1)
203 return -1;
204
205 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
206 }
207
208 /* Breaks offset into parts */
sm_break_offset(struct sm_ftl * ftl,loff_t offset,int * zone,int * block,int * boffset)209 static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
210 int *zone, int *block, int *boffset)
211 {
212 *boffset = do_div(offset, ftl->block_size);
213 *block = do_div(offset, ftl->max_lba);
214 *zone = offset >= ftl->zone_count ? -1 : offset;
215 }
216
217 /* ---------------------- low level IO ------------------------------------- */
218
sm_correct_sector(uint8_t * buffer,struct sm_oob * oob)219 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
220 {
221 uint8_t ecc[3];
222
223 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
224 if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
225 return -EIO;
226
227 buffer += SM_SMALL_PAGE;
228
229 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
230 if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
231 return -EIO;
232 return 0;
233 }
234
235 /* Reads a sector + oob*/
sm_read_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)236 static int sm_read_sector(struct sm_ftl *ftl,
237 int zone, int block, int boffset,
238 uint8_t *buffer, struct sm_oob *oob)
239 {
240 struct mtd_info *mtd = ftl->trans->mtd;
241 struct mtd_oob_ops ops;
242 struct sm_oob tmp_oob;
243 int ret = -EIO;
244 int try = 0;
245
246 /* FTL can contain -1 entries that are by default filled with bits */
247 if (block == -1) {
248 memset(buffer, 0xFF, SM_SECTOR_SIZE);
249 return 0;
250 }
251
252 /* User might not need the oob, but we do for data verification */
253 if (!oob)
254 oob = &tmp_oob;
255
256 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
257 ops.ooboffs = 0;
258 ops.ooblen = SM_OOB_SIZE;
259 ops.oobbuf = (void *)oob;
260 ops.len = SM_SECTOR_SIZE;
261 ops.datbuf = buffer;
262
263 again:
264 if (try++) {
265 /* Avoid infinite recursion on CIS reads, sm_recheck_media
266 won't help anyway */
267 if (zone == 0 && block == ftl->cis_block && boffset ==
268 ftl->cis_boffset)
269 return ret;
270
271 /* Test if media is stable */
272 if (try == 3 || sm_recheck_media(ftl))
273 return ret;
274 }
275
276 /* Unfortunately, oob read will _always_ succeed,
277 despite card removal..... */
278 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
279
280 /* Test for unknown errors */
281 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
282 dbg("read of block %d at zone %d, failed due to error (%d)",
283 block, zone, ret);
284 goto again;
285 }
286
287 /* Do a basic test on the oob, to guard against returned garbage */
288 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
289 goto again;
290
291 /* This should never happen, unless there is a bug in the mtd driver */
292 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
293 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
294
295 if (!buffer)
296 return 0;
297
298 /* Test if sector marked as bad */
299 if (!sm_sector_valid(oob)) {
300 dbg("read of block %d at zone %d, failed because it is marked"
301 " as bad" , block, zone);
302 goto again;
303 }
304
305 /* Test ECC*/
306 if (mtd_is_eccerr(ret) ||
307 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
308
309 dbg("read of block %d at zone %d, failed due to ECC error",
310 block, zone);
311 goto again;
312 }
313
314 return 0;
315 }
316
317 /* Writes a sector to media */
sm_write_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)318 static int sm_write_sector(struct sm_ftl *ftl,
319 int zone, int block, int boffset,
320 uint8_t *buffer, struct sm_oob *oob)
321 {
322 struct mtd_oob_ops ops;
323 struct mtd_info *mtd = ftl->trans->mtd;
324 int ret;
325
326 BUG_ON(ftl->readonly);
327
328 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
329 dbg("attempted to write the CIS!");
330 return -EIO;
331 }
332
333 if (ftl->unstable)
334 return -EIO;
335
336 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
337 ops.len = SM_SECTOR_SIZE;
338 ops.datbuf = buffer;
339 ops.ooboffs = 0;
340 ops.ooblen = SM_OOB_SIZE;
341 ops.oobbuf = (void *)oob;
342
343 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
344
345 /* Now we assume that hardware will catch write bitflip errors */
346
347 if (ret) {
348 dbg("write to block %d at zone %d, failed with error %d",
349 block, zone, ret);
350
351 sm_recheck_media(ftl);
352 return ret;
353 }
354
355 /* This should never happen, unless there is a bug in the driver */
356 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
357 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
358
359 return 0;
360 }
361
362 /* ------------------------ block IO ------------------------------------- */
363
364 /* Write a block using data and lba, and invalid sector bitmap */
sm_write_block(struct sm_ftl * ftl,uint8_t * buf,int zone,int block,int lba,unsigned long invalid_bitmap)365 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
366 int zone, int block, int lba,
367 unsigned long invalid_bitmap)
368 {
369 struct sm_oob oob;
370 int boffset;
371 int retry = 0;
372
373 /* Initialize the oob with requested values */
374 memset(&oob, 0xFF, SM_OOB_SIZE);
375 sm_write_lba(&oob, lba);
376 restart:
377 if (ftl->unstable)
378 return -EIO;
379
380 for (boffset = 0; boffset < ftl->block_size;
381 boffset += SM_SECTOR_SIZE) {
382
383 oob.data_status = 0xFF;
384
385 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
386
387 sm_printk("sector %d of block at LBA %d of zone %d"
388 " coudn't be read, marking it as invalid",
389 boffset / SM_SECTOR_SIZE, lba, zone);
390
391 oob.data_status = 0;
392 }
393
394 if (ftl->smallpagenand) {
395 __nand_calculate_ecc(buf + boffset,
396 SM_SMALL_PAGE, oob.ecc1);
397
398 __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
399 SM_SMALL_PAGE, oob.ecc2);
400 }
401 if (!sm_write_sector(ftl, zone, block, boffset,
402 buf + boffset, &oob))
403 continue;
404
405 if (!retry) {
406
407 /* If write fails. try to erase the block */
408 /* This is safe, because we never write in blocks
409 that contain valuable data.
410 This is intended to repair block that are marked
411 as erased, but that isn't fully erased*/
412
413 if (sm_erase_block(ftl, zone, block, 0))
414 return -EIO;
415
416 retry = 1;
417 goto restart;
418 } else {
419 sm_mark_block_bad(ftl, zone, block);
420 return -EIO;
421 }
422 }
423 return 0;
424 }
425
426
427 /* Mark whole block at offset 'offs' as bad. */
sm_mark_block_bad(struct sm_ftl * ftl,int zone,int block)428 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
429 {
430 struct sm_oob oob;
431 int boffset;
432
433 memset(&oob, 0xFF, SM_OOB_SIZE);
434 oob.block_status = 0xF0;
435
436 if (ftl->unstable)
437 return;
438
439 if (sm_recheck_media(ftl))
440 return;
441
442 sm_printk("marking block %d of zone %d as bad", block, zone);
443
444 /* We aren't checking the return value, because we don't care */
445 /* This also fails on fake xD cards, but I guess these won't expose
446 any bad blocks till fail completely */
447 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
448 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
449 }
450
451 /*
452 * Erase a block within a zone
453 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
454 */
sm_erase_block(struct sm_ftl * ftl,int zone_num,uint16_t block,int put_free)455 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
456 int put_free)
457 {
458 struct ftl_zone *zone = &ftl->zones[zone_num];
459 struct mtd_info *mtd = ftl->trans->mtd;
460 struct erase_info erase;
461
462 erase.mtd = mtd;
463 erase.callback = sm_erase_callback;
464 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
465 erase.len = ftl->block_size;
466 erase.priv = (u_long)ftl;
467
468 if (ftl->unstable)
469 return -EIO;
470
471 BUG_ON(ftl->readonly);
472
473 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
474 sm_printk("attempted to erase the CIS!");
475 return -EIO;
476 }
477
478 if (mtd_erase(mtd, &erase)) {
479 sm_printk("erase of block %d in zone %d failed",
480 block, zone_num);
481 goto error;
482 }
483
484 if (erase.state == MTD_ERASE_PENDING)
485 wait_for_completion(&ftl->erase_completion);
486
487 if (erase.state != MTD_ERASE_DONE) {
488 sm_printk("erase of block %d in zone %d failed after wait",
489 block, zone_num);
490 goto error;
491 }
492
493 if (put_free)
494 kfifo_in(&zone->free_sectors,
495 (const unsigned char *)&block, sizeof(block));
496
497 return 0;
498 error:
499 sm_mark_block_bad(ftl, zone_num, block);
500 return -EIO;
501 }
502
sm_erase_callback(struct erase_info * self)503 static void sm_erase_callback(struct erase_info *self)
504 {
505 struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
506 complete(&ftl->erase_completion);
507 }
508
509 /* Thoroughly test that block is valid. */
sm_check_block(struct sm_ftl * ftl,int zone,int block)510 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
511 {
512 int boffset;
513 struct sm_oob oob;
514 int lbas[] = { -3, 0, 0, 0 };
515 int i = 0;
516 int test_lba;
517
518
519 /* First just check that block doesn't look fishy */
520 /* Only blocks that are valid or are sliced in two parts, are
521 accepted */
522 for (boffset = 0; boffset < ftl->block_size;
523 boffset += SM_SECTOR_SIZE) {
524
525 /* This shouldn't happen anyway */
526 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
527 return -2;
528
529 test_lba = sm_read_lba(&oob);
530
531 if (lbas[i] != test_lba)
532 lbas[++i] = test_lba;
533
534 /* If we found three different LBAs, something is fishy */
535 if (i == 3)
536 return -EIO;
537 }
538
539 /* If the block is sliced (partially erased usually) erase it */
540 if (i == 2) {
541 sm_erase_block(ftl, zone, block, 1);
542 return 1;
543 }
544
545 return 0;
546 }
547
548 /* ----------------- media scanning --------------------------------- */
549 static const struct chs_entry chs_table[] = {
550 { 1, 125, 4, 4 },
551 { 2, 125, 4, 8 },
552 { 4, 250, 4, 8 },
553 { 8, 250, 4, 16 },
554 { 16, 500, 4, 16 },
555 { 32, 500, 8, 16 },
556 { 64, 500, 8, 32 },
557 { 128, 500, 16, 32 },
558 { 256, 1000, 16, 32 },
559 { 512, 1015, 32, 63 },
560 { 1024, 985, 33, 63 },
561 { 2048, 985, 33, 63 },
562 { 0 },
563 };
564
565
566 static const uint8_t cis_signature[] = {
567 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
568 };
569 /* Find out media parameters.
570 * This ideally has to be based on nand id, but for now device size is enough */
sm_get_media_info(struct sm_ftl * ftl,struct mtd_info * mtd)571 static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
572 {
573 int i;
574 int size_in_megs = mtd->size / (1024 * 1024);
575
576 ftl->readonly = mtd->type == MTD_ROM;
577
578 /* Manual settings for very old devices */
579 ftl->zone_count = 1;
580 ftl->smallpagenand = 0;
581
582 switch (size_in_megs) {
583 case 1:
584 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
585 ftl->zone_size = 256;
586 ftl->max_lba = 250;
587 ftl->block_size = 8 * SM_SECTOR_SIZE;
588 ftl->smallpagenand = 1;
589
590 break;
591 case 2:
592 /* 2 MiB flash SmartMedia (256 byte pages)*/
593 if (mtd->writesize == SM_SMALL_PAGE) {
594 ftl->zone_size = 512;
595 ftl->max_lba = 500;
596 ftl->block_size = 8 * SM_SECTOR_SIZE;
597 ftl->smallpagenand = 1;
598 /* 2 MiB rom SmartMedia */
599 } else {
600
601 if (!ftl->readonly)
602 return -ENODEV;
603
604 ftl->zone_size = 256;
605 ftl->max_lba = 250;
606 ftl->block_size = 16 * SM_SECTOR_SIZE;
607 }
608 break;
609 case 4:
610 /* 4 MiB flash/rom SmartMedia device */
611 ftl->zone_size = 512;
612 ftl->max_lba = 500;
613 ftl->block_size = 16 * SM_SECTOR_SIZE;
614 break;
615 case 8:
616 /* 8 MiB flash/rom SmartMedia device */
617 ftl->zone_size = 1024;
618 ftl->max_lba = 1000;
619 ftl->block_size = 16 * SM_SECTOR_SIZE;
620 }
621
622 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
623 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
624 if (size_in_megs >= 16) {
625 ftl->zone_count = size_in_megs / 16;
626 ftl->zone_size = 1024;
627 ftl->max_lba = 1000;
628 ftl->block_size = 32 * SM_SECTOR_SIZE;
629 }
630
631 /* Test for proper write,erase and oob sizes */
632 if (mtd->erasesize > ftl->block_size)
633 return -ENODEV;
634
635 if (mtd->writesize > SM_SECTOR_SIZE)
636 return -ENODEV;
637
638 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
639 return -ENODEV;
640
641 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
642 return -ENODEV;
643
644 /* We use OOB */
645 if (!mtd_has_oob(mtd))
646 return -ENODEV;
647
648 /* Find geometry information */
649 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
650 if (chs_table[i].size == size_in_megs) {
651 ftl->cylinders = chs_table[i].cyl;
652 ftl->heads = chs_table[i].head;
653 ftl->sectors = chs_table[i].sec;
654 return 0;
655 }
656 }
657
658 sm_printk("media has unknown size : %dMiB", size_in_megs);
659 ftl->cylinders = 985;
660 ftl->heads = 33;
661 ftl->sectors = 63;
662 return 0;
663 }
664
665 /* Validate the CIS */
sm_read_cis(struct sm_ftl * ftl)666 static int sm_read_cis(struct sm_ftl *ftl)
667 {
668 struct sm_oob oob;
669
670 if (sm_read_sector(ftl,
671 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
672 return -EIO;
673
674 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
675 return -EIO;
676
677 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
678 cis_signature, sizeof(cis_signature))) {
679 return 0;
680 }
681
682 return -EIO;
683 }
684
685 /* Scan the media for the CIS */
sm_find_cis(struct sm_ftl * ftl)686 static int sm_find_cis(struct sm_ftl *ftl)
687 {
688 struct sm_oob oob;
689 int block, boffset;
690 int block_found = 0;
691 int cis_found = 0;
692
693 /* Search for first valid block */
694 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
695
696 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
697 continue;
698
699 if (!sm_block_valid(&oob))
700 continue;
701 block_found = 1;
702 break;
703 }
704
705 if (!block_found)
706 return -EIO;
707
708 /* Search for first valid sector in this block */
709 for (boffset = 0 ; boffset < ftl->block_size;
710 boffset += SM_SECTOR_SIZE) {
711
712 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
713 continue;
714
715 if (!sm_sector_valid(&oob))
716 continue;
717 break;
718 }
719
720 if (boffset == ftl->block_size)
721 return -EIO;
722
723 ftl->cis_block = block;
724 ftl->cis_boffset = boffset;
725 ftl->cis_page_offset = 0;
726
727 cis_found = !sm_read_cis(ftl);
728
729 if (!cis_found) {
730 ftl->cis_page_offset = SM_SMALL_PAGE;
731 cis_found = !sm_read_cis(ftl);
732 }
733
734 if (cis_found) {
735 dbg("CIS block found at offset %x",
736 block * ftl->block_size +
737 boffset + ftl->cis_page_offset);
738 return 0;
739 }
740 return -EIO;
741 }
742
743 /* Basic test to determine if underlying mtd device if functional */
sm_recheck_media(struct sm_ftl * ftl)744 static int sm_recheck_media(struct sm_ftl *ftl)
745 {
746 if (sm_read_cis(ftl)) {
747
748 if (!ftl->unstable) {
749 sm_printk("media unstable, not allowing writes");
750 ftl->unstable = 1;
751 }
752 return -EIO;
753 }
754 return 0;
755 }
756
757 /* Initialize a FTL zone */
sm_init_zone(struct sm_ftl * ftl,int zone_num)758 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
759 {
760 struct ftl_zone *zone = &ftl->zones[zone_num];
761 struct sm_oob oob;
762 uint16_t block;
763 int lba;
764 int i = 0;
765 int len;
766
767 dbg("initializing zone %d", zone_num);
768
769 /* Allocate memory for FTL table */
770 zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
771
772 if (!zone->lba_to_phys_table)
773 return -ENOMEM;
774 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
775
776
777 /* Allocate memory for free sectors FIFO */
778 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
779 kfree(zone->lba_to_phys_table);
780 return -ENOMEM;
781 }
782
783 /* Now scan the zone */
784 for (block = 0 ; block < ftl->zone_size ; block++) {
785
786 /* Skip blocks till the CIS (including) */
787 if (zone_num == 0 && block <= ftl->cis_block)
788 continue;
789
790 /* Read the oob of first sector */
791 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
792 return -EIO;
793
794 /* Test to see if block is erased. It is enough to test
795 first sector, because erase happens in one shot */
796 if (sm_block_erased(&oob)) {
797 kfifo_in(&zone->free_sectors,
798 (unsigned char *)&block, 2);
799 continue;
800 }
801
802 /* If block is marked as bad, skip it */
803 /* This assumes we can trust first sector*/
804 /* However the way the block valid status is defined, ensures
805 very low probability of failure here */
806 if (!sm_block_valid(&oob)) {
807 dbg("PH %04d <-> <marked bad>", block);
808 continue;
809 }
810
811
812 lba = sm_read_lba(&oob);
813
814 /* Invalid LBA means that block is damaged. */
815 /* We can try to erase it, or mark it as bad, but
816 lets leave that to recovery application */
817 if (lba == -2 || lba >= ftl->max_lba) {
818 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
819 continue;
820 }
821
822
823 /* If there is no collision,
824 just put the sector in the FTL table */
825 if (zone->lba_to_phys_table[lba] < 0) {
826 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
827 zone->lba_to_phys_table[lba] = block;
828 continue;
829 }
830
831 sm_printk("collision"
832 " of LBA %d between blocks %d and %d in zone %d",
833 lba, zone->lba_to_phys_table[lba], block, zone_num);
834
835 /* Test that this block is valid*/
836 if (sm_check_block(ftl, zone_num, block))
837 continue;
838
839 /* Test now the old block */
840 if (sm_check_block(ftl, zone_num,
841 zone->lba_to_phys_table[lba])) {
842 zone->lba_to_phys_table[lba] = block;
843 continue;
844 }
845
846 /* If both blocks are valid and share same LBA, it means that
847 they hold different versions of same data. It not
848 known which is more recent, thus just erase one of them
849 */
850 sm_printk("both blocks are valid, erasing the later");
851 sm_erase_block(ftl, zone_num, block, 1);
852 }
853
854 dbg("zone initialized");
855 zone->initialized = 1;
856
857 /* No free sectors, means that the zone is heavily damaged, write won't
858 work, but it can still can be (partially) read */
859 if (!kfifo_len(&zone->free_sectors)) {
860 sm_printk("no free blocks in zone %d", zone_num);
861 return 0;
862 }
863
864 /* Randomize first block we write to */
865 get_random_bytes(&i, 2);
866 i %= (kfifo_len(&zone->free_sectors) / 2);
867
868 while (i--) {
869 len = kfifo_out(&zone->free_sectors,
870 (unsigned char *)&block, 2);
871 WARN_ON(len != 2);
872 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
873 }
874 return 0;
875 }
876
877 /* Get and automatically initialize an FTL mapping for one zone */
sm_get_zone(struct sm_ftl * ftl,int zone_num)878 static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
879 {
880 struct ftl_zone *zone;
881 int error;
882
883 BUG_ON(zone_num >= ftl->zone_count);
884 zone = &ftl->zones[zone_num];
885
886 if (!zone->initialized) {
887 error = sm_init_zone(ftl, zone_num);
888
889 if (error)
890 return ERR_PTR(error);
891 }
892 return zone;
893 }
894
895
896 /* ----------------- cache handling ------------------------------------------*/
897
898 /* Initialize the one block cache */
sm_cache_init(struct sm_ftl * ftl)899 static void sm_cache_init(struct sm_ftl *ftl)
900 {
901 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
902 ftl->cache_clean = 1;
903 ftl->cache_zone = -1;
904 ftl->cache_block = -1;
905 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
906 }
907
908 /* Put sector in one block cache */
sm_cache_put(struct sm_ftl * ftl,char * buffer,int boffset)909 static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
910 {
911 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
912 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
913 ftl->cache_clean = 0;
914 }
915
916 /* Read a sector from the cache */
sm_cache_get(struct sm_ftl * ftl,char * buffer,int boffset)917 static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
918 {
919 if (test_bit(boffset / SM_SECTOR_SIZE,
920 &ftl->cache_data_invalid_bitmap))
921 return -1;
922
923 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
924 return 0;
925 }
926
927 /* Write the cache to hardware */
sm_cache_flush(struct sm_ftl * ftl)928 static int sm_cache_flush(struct sm_ftl *ftl)
929 {
930 struct ftl_zone *zone;
931
932 int sector_num;
933 uint16_t write_sector;
934 int zone_num = ftl->cache_zone;
935 int block_num;
936
937 if (ftl->cache_clean)
938 return 0;
939
940 if (ftl->unstable)
941 return -EIO;
942
943 BUG_ON(zone_num < 0);
944 zone = &ftl->zones[zone_num];
945 block_num = zone->lba_to_phys_table[ftl->cache_block];
946
947
948 /* Try to read all unread areas of the cache block*/
949 for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
950 ftl->block_size / SM_SECTOR_SIZE) {
951
952 if (!sm_read_sector(ftl,
953 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
954 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
955 clear_bit(sector_num,
956 &ftl->cache_data_invalid_bitmap);
957 }
958 restart:
959
960 if (ftl->unstable)
961 return -EIO;
962
963 /* If there are no spare blocks, */
964 /* we could still continue by erasing/writing the current block,
965 but for such worn out media it doesn't worth the trouble,
966 and the dangers */
967 if (kfifo_out(&zone->free_sectors,
968 (unsigned char *)&write_sector, 2) != 2) {
969 dbg("no free sectors for write!");
970 return -EIO;
971 }
972
973
974 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
975 ftl->cache_block, ftl->cache_data_invalid_bitmap))
976 goto restart;
977
978 /* Update the FTL table */
979 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
980
981 /* Write succesfull, so erase and free the old block */
982 if (block_num > 0)
983 sm_erase_block(ftl, zone_num, block_num, 1);
984
985 sm_cache_init(ftl);
986 return 0;
987 }
988
989
990 /* flush timer, runs a second after last write */
sm_cache_flush_timer(unsigned long data)991 static void sm_cache_flush_timer(unsigned long data)
992 {
993 struct sm_ftl *ftl = (struct sm_ftl *)data;
994 queue_work(cache_flush_workqueue, &ftl->flush_work);
995 }
996
997 /* cache flush work, kicked by timer */
sm_cache_flush_work(struct work_struct * work)998 static void sm_cache_flush_work(struct work_struct *work)
999 {
1000 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
1001 mutex_lock(&ftl->mutex);
1002 sm_cache_flush(ftl);
1003 mutex_unlock(&ftl->mutex);
1004 return;
1005 }
1006
1007 /* ---------------- outside interface -------------------------------------- */
1008
1009 /* outside interface: read a sector */
sm_read(struct mtd_blktrans_dev * dev,unsigned long sect_no,char * buf)1010 static int sm_read(struct mtd_blktrans_dev *dev,
1011 unsigned long sect_no, char *buf)
1012 {
1013 struct sm_ftl *ftl = dev->priv;
1014 struct ftl_zone *zone;
1015 int error = 0, in_cache = 0;
1016 int zone_num, block, boffset;
1017
1018 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1019 mutex_lock(&ftl->mutex);
1020
1021
1022 zone = sm_get_zone(ftl, zone_num);
1023 if (IS_ERR(zone)) {
1024 error = PTR_ERR(zone);
1025 goto unlock;
1026 }
1027
1028 /* Have to look at cache first */
1029 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1030 in_cache = 1;
1031 if (!sm_cache_get(ftl, buf, boffset))
1032 goto unlock;
1033 }
1034
1035 /* Translate the block and return if doesn't exist in the table */
1036 block = zone->lba_to_phys_table[block];
1037
1038 if (block == -1) {
1039 memset(buf, 0xFF, SM_SECTOR_SIZE);
1040 goto unlock;
1041 }
1042
1043 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1044 error = -EIO;
1045 goto unlock;
1046 }
1047
1048 if (in_cache)
1049 sm_cache_put(ftl, buf, boffset);
1050 unlock:
1051 mutex_unlock(&ftl->mutex);
1052 return error;
1053 }
1054
1055 /* outside interface: write a sector */
sm_write(struct mtd_blktrans_dev * dev,unsigned long sec_no,char * buf)1056 static int sm_write(struct mtd_blktrans_dev *dev,
1057 unsigned long sec_no, char *buf)
1058 {
1059 struct sm_ftl *ftl = dev->priv;
1060 struct ftl_zone *zone;
1061 int error = 0, zone_num, block, boffset;
1062
1063 BUG_ON(ftl->readonly);
1064 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1065
1066 /* No need in flush thread running now */
1067 del_timer(&ftl->timer);
1068 mutex_lock(&ftl->mutex);
1069
1070 zone = sm_get_zone(ftl, zone_num);
1071 if (IS_ERR(zone)) {
1072 error = PTR_ERR(zone);
1073 goto unlock;
1074 }
1075
1076 /* If entry is not in cache, flush it */
1077 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1078
1079 error = sm_cache_flush(ftl);
1080 if (error)
1081 goto unlock;
1082
1083 ftl->cache_block = block;
1084 ftl->cache_zone = zone_num;
1085 }
1086
1087 sm_cache_put(ftl, buf, boffset);
1088 unlock:
1089 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1090 mutex_unlock(&ftl->mutex);
1091 return error;
1092 }
1093
1094 /* outside interface: flush everything */
sm_flush(struct mtd_blktrans_dev * dev)1095 static int sm_flush(struct mtd_blktrans_dev *dev)
1096 {
1097 struct sm_ftl *ftl = dev->priv;
1098 int retval;
1099
1100 mutex_lock(&ftl->mutex);
1101 retval = sm_cache_flush(ftl);
1102 mutex_unlock(&ftl->mutex);
1103 return retval;
1104 }
1105
1106 /* outside interface: device is released */
sm_release(struct mtd_blktrans_dev * dev)1107 static void sm_release(struct mtd_blktrans_dev *dev)
1108 {
1109 struct sm_ftl *ftl = dev->priv;
1110
1111 mutex_lock(&ftl->mutex);
1112 del_timer_sync(&ftl->timer);
1113 cancel_work_sync(&ftl->flush_work);
1114 sm_cache_flush(ftl);
1115 mutex_unlock(&ftl->mutex);
1116 }
1117
1118 /* outside interface: get geometry */
sm_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)1119 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1120 {
1121 struct sm_ftl *ftl = dev->priv;
1122 geo->heads = ftl->heads;
1123 geo->sectors = ftl->sectors;
1124 geo->cylinders = ftl->cylinders;
1125 return 0;
1126 }
1127
1128 /* external interface: main initialization function */
sm_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)1129 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1130 {
1131 struct mtd_blktrans_dev *trans;
1132 struct sm_ftl *ftl;
1133
1134 /* Allocate & initialize our private structure */
1135 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1136 if (!ftl)
1137 goto error1;
1138
1139
1140 mutex_init(&ftl->mutex);
1141 setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1142 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1143 init_completion(&ftl->erase_completion);
1144
1145 /* Read media information */
1146 if (sm_get_media_info(ftl, mtd)) {
1147 dbg("found unsupported mtd device, aborting");
1148 goto error2;
1149 }
1150
1151
1152 /* Allocate temporary CIS buffer for read retry support */
1153 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1154 if (!ftl->cis_buffer)
1155 goto error2;
1156
1157 /* Allocate zone array, it will be initialized on demand */
1158 ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1159 GFP_KERNEL);
1160 if (!ftl->zones)
1161 goto error3;
1162
1163 /* Allocate the cache*/
1164 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1165
1166 if (!ftl->cache_data)
1167 goto error4;
1168
1169 sm_cache_init(ftl);
1170
1171
1172 /* Allocate upper layer structure and initialize it */
1173 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1174 if (!trans)
1175 goto error5;
1176
1177 ftl->trans = trans;
1178 trans->priv = ftl;
1179
1180 trans->tr = tr;
1181 trans->mtd = mtd;
1182 trans->devnum = -1;
1183 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1184 trans->readonly = ftl->readonly;
1185
1186 if (sm_find_cis(ftl)) {
1187 dbg("CIS not found on mtd device, aborting");
1188 goto error6;
1189 }
1190
1191 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1192 if (!ftl->disk_attributes)
1193 goto error6;
1194 trans->disk_attributes = ftl->disk_attributes;
1195
1196 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1197 (int)(mtd->size / (1024 * 1024)), mtd->index);
1198
1199 dbg("FTL layout:");
1200 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1201 ftl->zone_count, ftl->max_lba,
1202 ftl->zone_size - ftl->max_lba);
1203 dbg("each block consists of %d bytes",
1204 ftl->block_size);
1205
1206
1207 /* Register device*/
1208 if (add_mtd_blktrans_dev(trans)) {
1209 dbg("error in mtdblktrans layer");
1210 goto error6;
1211 }
1212 return;
1213 error6:
1214 kfree(trans);
1215 error5:
1216 kfree(ftl->cache_data);
1217 error4:
1218 kfree(ftl->zones);
1219 error3:
1220 kfree(ftl->cis_buffer);
1221 error2:
1222 kfree(ftl);
1223 error1:
1224 return;
1225 }
1226
1227 /* main interface: device {surprise,} removal */
sm_remove_dev(struct mtd_blktrans_dev * dev)1228 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1229 {
1230 struct sm_ftl *ftl = dev->priv;
1231 int i;
1232
1233 del_mtd_blktrans_dev(dev);
1234 ftl->trans = NULL;
1235
1236 for (i = 0 ; i < ftl->zone_count; i++) {
1237
1238 if (!ftl->zones[i].initialized)
1239 continue;
1240
1241 kfree(ftl->zones[i].lba_to_phys_table);
1242 kfifo_free(&ftl->zones[i].free_sectors);
1243 }
1244
1245 sm_delete_sysfs_attributes(ftl);
1246 kfree(ftl->cis_buffer);
1247 kfree(ftl->zones);
1248 kfree(ftl->cache_data);
1249 kfree(ftl);
1250 }
1251
1252 static struct mtd_blktrans_ops sm_ftl_ops = {
1253 .name = "smblk",
1254 .major = 0,
1255 .part_bits = SM_FTL_PARTN_BITS,
1256 .blksize = SM_SECTOR_SIZE,
1257 .getgeo = sm_getgeo,
1258
1259 .add_mtd = sm_add_mtd,
1260 .remove_dev = sm_remove_dev,
1261
1262 .readsect = sm_read,
1263 .writesect = sm_write,
1264
1265 .flush = sm_flush,
1266 .release = sm_release,
1267
1268 .owner = THIS_MODULE,
1269 };
1270
sm_module_init(void)1271 static __init int sm_module_init(void)
1272 {
1273 int error = 0;
1274
1275 cache_flush_workqueue = create_freezable_workqueue("smflush");
1276 if (!cache_flush_workqueue)
1277 return -ENOMEM;
1278
1279 error = register_mtd_blktrans(&sm_ftl_ops);
1280 if (error)
1281 destroy_workqueue(cache_flush_workqueue);
1282 return error;
1283
1284 }
1285
sm_module_exit(void)1286 static void __exit sm_module_exit(void)
1287 {
1288 destroy_workqueue(cache_flush_workqueue);
1289 deregister_mtd_blktrans(&sm_ftl_ops);
1290 }
1291
1292 module_init(sm_module_init);
1293 module_exit(sm_module_exit);
1294
1295 MODULE_LICENSE("GPL");
1296 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1297 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
1298