Lines Matching full:part
92 static int build_block_map(struct partition *part, int block_no) in build_block_map() argument
94 struct block *block = &part->blocks[block_no]; in build_block_map()
97 block->offset = part->block_size * block_no; in build_block_map()
99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { in build_block_map()
106 for (i=0; i<part->data_sectors_per_block; i++) { in build_block_map()
109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); in build_block_map()
122 if (entry >= part->sector_count) { in build_block_map()
126 part->mbd.mtd->name, block_no, i, entry); in build_block_map()
130 if (part->sector_map[entry] != -1) { in build_block_map()
133 part->mbd.mtd->name, entry); in build_block_map()
134 part->errors = 1; in build_block_map()
138 part->sector_map[entry] = block->offset + in build_block_map()
139 (i + part->header_sectors_per_block) * SECTOR_SIZE; in build_block_map()
144 if (block->free_sectors == part->data_sectors_per_block) in build_block_map()
145 part->reserved_block = block_no; in build_block_map()
150 static int scan_header(struct partition *part) in scan_header() argument
157 sectors_per_block = part->block_size / SECTOR_SIZE; in scan_header()
158 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; in scan_header()
160 if (part->total_blocks < 2) in scan_header()
164 part->header_sectors_per_block = in scan_header()
168 part->data_sectors_per_block = sectors_per_block - in scan_header()
169 part->header_sectors_per_block; in scan_header()
171 part->header_size = (HEADER_MAP_OFFSET + in scan_header()
172 part->data_sectors_per_block) * sizeof(u16); in scan_header()
174 part->cylinders = (part->data_sectors_per_block * in scan_header()
175 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; in scan_header()
177 part->sector_count = part->cylinders * SECTORS_PER_TRACK; in scan_header()
179 part->current_block = -1; in scan_header()
180 part->reserved_block = -1; in scan_header()
181 part->is_reclaiming = 0; in scan_header()
183 part->header_cache = kmalloc(part->header_size, GFP_KERNEL); in scan_header()
184 if (!part->header_cache) in scan_header()
187 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), in scan_header()
189 if (!part->blocks) in scan_header()
192 part->sector_map = vmalloc(array_size(sizeof(u_long), in scan_header()
193 part->sector_count)); in scan_header()
194 if (!part->sector_map) { in scan_header()
196 "sector map", part->mbd.mtd->name); in scan_header()
200 for (i=0; i<part->sector_count; i++) in scan_header()
201 part->sector_map[i] = -1; in scan_header()
203 for (i=0, blocks_found=0; i<part->total_blocks; i++) { in scan_header()
204 rc = mtd_read(part->mbd.mtd, i * part->block_size, in scan_header()
205 part->header_size, &retlen, in scan_header()
206 (u_char *)part->header_cache); in scan_header()
208 if (!rc && retlen != part->header_size) in scan_header()
214 if (!build_block_map(part, i)) in scan_header()
220 part->mbd.mtd->name); in scan_header()
225 if (part->reserved_block == -1) { in scan_header()
227 part->mbd.mtd->name); in scan_header()
229 part->errors = 1; in scan_header()
235 vfree(part->sector_map); in scan_header()
236 kfree(part->header_cache); in scan_header()
237 kfree(part->blocks); in scan_header()
244 struct partition *part = (struct partition*)dev; in rfd_ftl_readsect() local
249 if (sector >= part->sector_count) in rfd_ftl_readsect()
252 addr = part->sector_map[sector]; in rfd_ftl_readsect()
254 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in rfd_ftl_readsect()
261 "0x%lx\n", part->mbd.mtd->name, addr); in rfd_ftl_readsect()
270 static int erase_block(struct partition *part, int block) in erase_block() argument
279 erase->addr = part->blocks[block].offset; in erase_block()
280 erase->len = part->block_size; in erase_block()
282 part->blocks[block].state = BLOCK_ERASING; in erase_block()
283 part->blocks[block].free_sectors = 0; in erase_block()
285 rc = mtd_erase(part->mbd.mtd, erase); in erase_block()
289 (unsigned long long)erase->len, part->mbd.mtd->name); in erase_block()
290 part->blocks[block].state = BLOCK_FAILED; in erase_block()
291 part->blocks[block].free_sectors = 0; in erase_block()
292 part->blocks[block].used_sectors = 0; in erase_block()
297 part->blocks[block].state = BLOCK_ERASED; in erase_block()
298 part->blocks[block].free_sectors = part->data_sectors_per_block; in erase_block()
299 part->blocks[block].used_sectors = 0; in erase_block()
300 part->blocks[block].erases++; in erase_block()
302 rc = mtd_write(part->mbd.mtd, part->blocks[block].offset, in erase_block()
309 part->mbd.mtd->name, part->blocks[block].offset); in erase_block()
310 part->blocks[block].state = BLOCK_FAILED; in erase_block()
312 part->blocks[block].state = BLOCK_OK; in erase_block()
321 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) in move_block_contents() argument
328 part->is_reclaiming = 1; in move_block_contents()
334 map = kmalloc(part->header_size, GFP_KERNEL); in move_block_contents()
338 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, in move_block_contents()
339 part->header_size, &retlen, (u_char *)map); in move_block_contents()
341 if (!rc && retlen != part->header_size) in move_block_contents()
346 "0x%lx\n", part->mbd.mtd->name, in move_block_contents()
347 part->blocks[block_no].offset); in move_block_contents()
352 for (i=0; i<part->data_sectors_per_block; i++) { in move_block_contents()
364 if (entry >= part->sector_count) in move_block_contents()
367 addr = part->blocks[block_no].offset + in move_block_contents()
368 (i + part->header_sectors_per_block) * SECTOR_SIZE; in move_block_contents()
372 if (!part->blocks[block_no].used_sectors--) { in move_block_contents()
373 rc = erase_block(part, block_no); in move_block_contents()
378 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in move_block_contents()
387 part->mbd.mtd->name); in move_block_contents()
392 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, in move_block_contents()
404 part->is_reclaiming = 0; in move_block_contents()
409 static int reclaim_block(struct partition *part, u_long *old_sector) in reclaim_block() argument
415 mtd_sync(part->mbd.mtd); in reclaim_block()
420 old_sector_block = *old_sector / part->block_size; in reclaim_block()
424 for (block=0; block<part->total_blocks; block++) { in reclaim_block()
427 if (block == part->reserved_block) in reclaim_block()
435 if (part->blocks[block].free_sectors) in reclaim_block()
438 this_score = part->blocks[block].used_sectors; in reclaim_block()
444 if (part->blocks[block].used_sectors == in reclaim_block()
445 part->data_sectors_per_block) in reclaim_block()
449 this_score += part->blocks[block].erases; in reclaim_block()
460 part->current_block = -1; in reclaim_block()
461 part->reserved_block = best_block; in reclaim_block()
465 part->blocks[best_block].used_sectors, in reclaim_block()
466 part->blocks[best_block].free_sectors); in reclaim_block()
468 if (part->blocks[best_block].used_sectors) in reclaim_block()
469 rc = move_block_contents(part, best_block, old_sector); in reclaim_block()
471 rc = erase_block(part, best_block); in reclaim_block()
481 static int find_free_block(struct partition *part) in find_free_block() argument
485 block = part->current_block == -1 ? in find_free_block()
486 jiffies % part->total_blocks : part->current_block; in find_free_block()
490 if (part->blocks[block].free_sectors && in find_free_block()
491 block != part->reserved_block) in find_free_block()
494 if (part->blocks[block].state == BLOCK_UNUSED) in find_free_block()
495 erase_block(part, block); in find_free_block()
497 if (++block >= part->total_blocks) in find_free_block()
505 static int find_writable_block(struct partition *part, u_long *old_sector) in find_writable_block() argument
510 block = find_free_block(part); in find_writable_block()
513 if (!part->is_reclaiming) { in find_writable_block()
514 rc = reclaim_block(part, old_sector); in find_writable_block()
518 block = find_free_block(part); in find_writable_block()
527 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, in find_writable_block()
528 part->header_size, &retlen, in find_writable_block()
529 (u_char *)part->header_cache); in find_writable_block()
531 if (!rc && retlen != part->header_size) in find_writable_block()
536 "0x%lx\n", part->mbd.mtd->name, in find_writable_block()
537 part->blocks[block].offset); in find_writable_block()
541 part->current_block = block; in find_writable_block()
547 static int mark_sector_deleted(struct partition *part, u_long old_addr) in mark_sector_deleted() argument
554 block = old_addr / part->block_size; in mark_sector_deleted()
555 offset = (old_addr % part->block_size) / SECTOR_SIZE - in mark_sector_deleted()
556 part->header_sectors_per_block; in mark_sector_deleted()
558 addr = part->blocks[block].offset + in mark_sector_deleted()
560 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, in mark_sector_deleted()
568 "0x%lx\n", part->mbd.mtd->name, addr); in mark_sector_deleted()
571 if (block == part->current_block) in mark_sector_deleted()
572 part->header_cache[offset + HEADER_MAP_OFFSET] = del; in mark_sector_deleted()
574 part->blocks[block].used_sectors--; in mark_sector_deleted()
576 if (!part->blocks[block].used_sectors && in mark_sector_deleted()
577 !part->blocks[block].free_sectors) in mark_sector_deleted()
578 rc = erase_block(part, block); in mark_sector_deleted()
584 static int find_free_sector(const struct partition *part, const struct block *block) in find_free_sector() argument
588 i = stop = part->data_sectors_per_block - block->free_sectors; in find_free_sector()
591 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) in find_free_sector()
595 if (++i == part->data_sectors_per_block) in find_free_sector()
605 struct partition *part = (struct partition*)dev; in do_writesect() local
613 if (part->current_block == -1 || in do_writesect()
614 !part->blocks[part->current_block].free_sectors) { in do_writesect()
616 rc = find_writable_block(part, old_addr); in do_writesect()
621 block = &part->blocks[part->current_block]; in do_writesect()
623 i = find_free_sector(part, block); in do_writesect()
630 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + in do_writesect()
632 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in do_writesect()
640 part->mbd.mtd->name, addr); in do_writesect()
644 part->sector_map[sector] = addr; in do_writesect()
648 part->header_cache[i + HEADER_MAP_OFFSET] = entry; in do_writesect()
651 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, in do_writesect()
659 part->mbd.mtd->name, addr); in do_writesect()
671 struct partition *part = (struct partition*)dev; in rfd_ftl_writesect() local
678 if (part->reserved_block == -1) { in rfd_ftl_writesect()
683 if (sector >= part->sector_count) { in rfd_ftl_writesect()
688 old_addr = part->sector_map[sector]; in rfd_ftl_writesect()
701 part->sector_map[sector] = -1; in rfd_ftl_writesect()
704 rc = mark_sector_deleted(part, old_addr); in rfd_ftl_writesect()
712 struct partition *part = (struct partition*)dev; in rfd_ftl_getgeo() local
716 geo->cylinders = part->cylinders; in rfd_ftl_getgeo()
723 struct partition *part; in rfd_ftl_add_mtd() local
728 part = kzalloc(sizeof(struct partition), GFP_KERNEL); in rfd_ftl_add_mtd()
729 if (!part) in rfd_ftl_add_mtd()
732 part->mbd.mtd = mtd; in rfd_ftl_add_mtd()
735 part->block_size = block_size; in rfd_ftl_add_mtd()
741 part->block_size = mtd->erasesize; in rfd_ftl_add_mtd()
744 if (scan_header(part) == 0) { in rfd_ftl_add_mtd()
745 part->mbd.size = part->sector_count; in rfd_ftl_add_mtd()
746 part->mbd.tr = tr; in rfd_ftl_add_mtd()
747 part->mbd.devnum = -1; in rfd_ftl_add_mtd()
749 part->mbd.readonly = 1; in rfd_ftl_add_mtd()
750 else if (part->errors) { in rfd_ftl_add_mtd()
753 part->mbd.readonly = 1; in rfd_ftl_add_mtd()
759 if (!add_mtd_blktrans_dev((void*)part)) in rfd_ftl_add_mtd()
763 kfree(part); in rfd_ftl_add_mtd()
768 struct partition *part = (struct partition*)dev; in rfd_ftl_remove_dev() local
771 for (i=0; i<part->total_blocks; i++) { in rfd_ftl_remove_dev()
773 part->mbd.mtd->name, i, part->blocks[i].erases); in rfd_ftl_remove_dev()
777 vfree(part->sector_map); in rfd_ftl_remove_dev()
778 kfree(part->header_cache); in rfd_ftl_remove_dev()
779 kfree(part->blocks); in rfd_ftl_remove_dev()