Lines Matching +full:nand +full:- +full:style
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
31 (*(noise))--; \
43 * Returning an error will abort the mount - bad checksums etc. should just mark the space
55 if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) in min_free()
56 return c->wbuf_pagesize; in min_free()
75 if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) in file_dirty()
79 jeb->dirty_size += jeb->wasted_size; in file_dirty()
80 c->dirty_size += jeb->wasted_size; in file_dirty()
81 c->wasted_size -= jeb->wasted_size; in file_dirty()
82 jeb->wasted_size = 0; in file_dirty()
83 if (VERYDIRTY(c, jeb->dirty_size)) { in file_dirty()
84 list_add(&jeb->list, &c->very_dirty_list); in file_dirty()
86 list_add(&jeb->list, &c->dirty_list); in file_dirty()
101 ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, in jffs2_scan_medium()
103 if (!ret && pointlen < c->mtd->size) { in jffs2_scan_medium()
107 mtd_unpoint(c->mtd, 0, pointlen); in jffs2_scan_medium()
110 if (ret && ret != -EOPNOTSUPP) in jffs2_scan_medium()
114 /* For NAND it's quicker to read a whole eraseblock at a time, in jffs2_scan_medium()
117 try_size = c->sector_size; in jffs2_scan_medium()
124 flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); in jffs2_scan_medium()
126 return -ENOMEM; in jffs2_scan_medium()
138 ret = -ENOMEM; in jffs2_scan_medium()
143 for (i=0; i<c->nr_blocks; i++) { in jffs2_scan_medium()
144 struct jffs2_eraseblock *jeb = &c->blocks[i]; in jffs2_scan_medium()
151 ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), in jffs2_scan_medium()
170 list_add(&jeb->list, &c->erase_pending_list); in jffs2_scan_medium()
171 c->nr_erasing_blocks++; in jffs2_scan_medium()
176 if (!jeb->dirty_size) { in jffs2_scan_medium()
178 list_add(&jeb->list, &c->free_list); in jffs2_scan_medium()
179 c->nr_free_blocks++; in jffs2_scan_medium()
182 jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", in jffs2_scan_medium()
183 jeb->offset); in jffs2_scan_medium()
184 list_add(&jeb->list, &c->erase_pending_list); in jffs2_scan_medium()
185 c->nr_erasing_blocks++; in jffs2_scan_medium()
191 list_add(&jeb->list, &c->clean_list); in jffs2_scan_medium()
198 if (jeb->free_size > min_free(c) && in jffs2_scan_medium()
199 (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { in jffs2_scan_medium()
201 if (c->nextblock) { in jffs2_scan_medium()
202 ret = file_dirty(c, c->nextblock); in jffs2_scan_medium()
206 jffs2_sum_reset_collected(c->summary); in jffs2_scan_medium()
211 __func__, jeb->offset); in jffs2_scan_medium()
212 c->nextblock = jeb; in jffs2_scan_medium()
221 /* Nothing valid - not even a clean marker. Needs erasing. */ in jffs2_scan_medium()
224 jeb->offset); in jffs2_scan_medium()
225 list_add(&jeb->list, &c->erase_pending_list); in jffs2_scan_medium()
226 c->nr_erasing_blocks++; in jffs2_scan_medium()
230 jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); in jffs2_scan_medium()
231 list_add(&jeb->list, &c->bad_list); in jffs2_scan_medium()
232 c->bad_size += c->sector_size; in jffs2_scan_medium()
233 c->free_size -= c->sector_size; in jffs2_scan_medium()
243 if (c->nextblock && (c->nextblock->dirty_size)) { in jffs2_scan_medium()
244 c->nextblock->wasted_size += c->nextblock->dirty_size; in jffs2_scan_medium()
245 c->wasted_size += c->nextblock->dirty_size; in jffs2_scan_medium()
246 c->dirty_size -= c->nextblock->dirty_size; in jffs2_scan_medium()
247 c->nextblock->dirty_size = 0; in jffs2_scan_medium()
250 …if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % … in jffs2_scan_medium()
252 contains data, and the end of the data isn't page-aligned, in jffs2_scan_medium()
255 uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; in jffs2_scan_medium()
259 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); in jffs2_scan_medium()
262 jffs2_scan_dirty_space(c, c->nextblock, skip); in jffs2_scan_medium()
265 if (c->nr_erasing_blocks) { in jffs2_scan_medium()
266 if (!c->used_size && !c->unchecked_size && in jffs2_scan_medium()
267 ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) { in jffs2_scan_medium()
269 pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", in jffs2_scan_medium()
270 empty_blocks, bad_blocks, c->nr_blocks); in jffs2_scan_medium()
271 ret = -EIO; in jffs2_scan_medium()
274 spin_lock(&c->erase_completion_lock); in jffs2_scan_medium()
276 spin_unlock(&c->erase_completion_lock); in jffs2_scan_medium()
287 mtd_unpoint(c->mtd, 0, c->mtd->size); in jffs2_scan_medium()
300 jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", in jffs2_fill_scan_buf()
307 return -EIO; in jffs2_fill_scan_buf()
314 if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size in jffs2_scan_classify_jeb()
315 && (!jeb->first_node || !ref_next(jeb->first_node)) ) in jffs2_scan_classify_jeb()
319 else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { in jffs2_scan_classify_jeb()
320 c->dirty_size -= jeb->dirty_size; in jffs2_scan_classify_jeb()
321 c->wasted_size += jeb->dirty_size; in jffs2_scan_classify_jeb()
322 jeb->wasted_size += jeb->dirty_size; in jffs2_scan_classify_jeb()
323 jeb->dirty_size = 0; in jffs2_scan_classify_jeb()
325 } else if (jeb->used_size || jeb->unchecked_size) in jffs2_scan_classify_jeb()
340 crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); in jffs2_scan_xattr_node()
341 if (crc != je32_to_cpu(rx->node_crc)) { in jffs2_scan_xattr_node()
343 ofs, je32_to_cpu(rx->node_crc), crc); in jffs2_scan_xattr_node()
344 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) in jffs2_scan_xattr_node()
349 xid = je32_to_cpu(rx->xid); in jffs2_scan_xattr_node()
350 version = je32_to_cpu(rx->version); in jffs2_scan_xattr_node()
353 + rx->name_len + 1 + je16_to_cpu(rx->value_len)); in jffs2_scan_xattr_node()
354 if (totlen != je32_to_cpu(rx->totlen)) { in jffs2_scan_xattr_node()
356 ofs, je32_to_cpu(rx->totlen), totlen); in jffs2_scan_xattr_node()
357 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) in jffs2_scan_xattr_node()
366 if (xd->version > version) { in jffs2_scan_xattr_node()
369 raw->next_in_ino = xd->node->next_in_ino; in jffs2_scan_xattr_node()
370 xd->node->next_in_ino = raw; in jffs2_scan_xattr_node()
372 xd->version = version; in jffs2_scan_xattr_node()
373 xd->xprefix = rx->xprefix; in jffs2_scan_xattr_node()
374 xd->name_len = rx->name_len; in jffs2_scan_xattr_node()
375 xd->value_len = je16_to_cpu(rx->value_len); in jffs2_scan_xattr_node()
376 xd->data_crc = je32_to_cpu(rx->data_crc); in jffs2_scan_xattr_node()
382 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); in jffs2_scan_xattr_node()
384 ofs, xd->xid, xd->version); in jffs2_scan_xattr_node()
396 crc = crc32(0, rr, sizeof(*rr) - 4); in jffs2_scan_xref_node()
397 if (crc != je32_to_cpu(rr->node_crc)) { in jffs2_scan_xref_node()
399 ofs, je32_to_cpu(rr->node_crc), crc); in jffs2_scan_xref_node()
400 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) in jffs2_scan_xref_node()
405 if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) { in jffs2_scan_xref_node()
407 ofs, je32_to_cpu(rr->totlen), in jffs2_scan_xref_node()
409 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) in jffs2_scan_xref_node()
416 return -ENOMEM; in jffs2_scan_xref_node()
420 * ref->xid is used to store 32bit xid, xd is not used in jffs2_scan_xref_node()
421 * ref->ino is used to store 32bit inode-number, ic is not used in jffs2_scan_xref_node()
423 * are exclusive. In a similar way, ref->next is temporarily in jffs2_scan_xref_node()
424 * used to chain all xattr_ref object. It's re-chained to in jffs2_scan_xref_node()
427 ref->ino = je32_to_cpu(rr->ino); in jffs2_scan_xref_node()
428 ref->xid = je32_to_cpu(rr->xid); in jffs2_scan_xref_node()
429 ref->xseqno = je32_to_cpu(rr->xseqno); in jffs2_scan_xref_node()
430 if (ref->xseqno > c->highest_xseqno) in jffs2_scan_xref_node()
431 c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); in jffs2_scan_xref_node()
432 ref->next = c->xref_temp; in jffs2_scan_xref_node()
433 c->xref_temp = ref; in jffs2_scan_xref_node()
435 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); in jffs2_scan_xref_node()
438 jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); in jffs2_scan_xref_node()
440 ofs, ref->xid, ref->ino); in jffs2_scan_xref_node()
446 the flash, XIP-style */
461 ofs = jeb->offset; in jffs2_scan_eraseblock()
462 prevofs = jeb->offset - 1; in jffs2_scan_eraseblock()
470 if (mtd_block_isbad(c->mtd, jeb->offset)) in jffs2_scan_eraseblock()
494 sm = (void *)buf + c->sector_size - sizeof(*sm); in jffs2_scan_eraseblock()
495 if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { in jffs2_scan_eraseblock()
496 sumptr = buf + je32_to_cpu(sm->offset); in jffs2_scan_eraseblock()
497 sumlen = c->sector_size - je32_to_cpu(sm->offset); in jffs2_scan_eraseblock()
500 /* If NAND flash, read a whole page of it. Else just the end */ in jffs2_scan_eraseblock()
501 if (c->wbuf_pagesize) in jffs2_scan_eraseblock()
502 buf_len = c->wbuf_pagesize; in jffs2_scan_eraseblock()
507 err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, in jffs2_scan_eraseblock()
508 jeb->offset + c->sector_size - buf_len, in jffs2_scan_eraseblock()
513 sm = (void *)buf + buf_size - sizeof(*sm); in jffs2_scan_eraseblock()
514 if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { in jffs2_scan_eraseblock()
515 sumlen = c->sector_size - je32_to_cpu(sm->offset); in jffs2_scan_eraseblock()
516 sumptr = buf + buf_size - sumlen; in jffs2_scan_eraseblock()
518 /* sm->offset maybe wrong but MAGIC maybe right */ in jffs2_scan_eraseblock()
519 if (sumlen > c->sector_size) in jffs2_scan_eraseblock()
527 return -ENOMEM; in jffs2_scan_eraseblock()
528 memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); in jffs2_scan_eraseblock()
533 jeb->offset + c->sector_size - sumlen, in jffs2_scan_eraseblock()
534 sumlen - buf_len); in jffs2_scan_eraseblock()
560 buf_ofs = jeb->offset; in jffs2_scan_eraseblock()
563 /* This is the XIP case -- we're reading _directly_ from the flash chip */ in jffs2_scan_eraseblock()
564 buf_len = c->sector_size; in jffs2_scan_eraseblock()
566 buf_len = EMPTY_SCAN_SIZE(c->sector_size); in jffs2_scan_eraseblock()
574 max_ofs = EMPTY_SCAN_SIZE(c->sector_size); in jffs2_scan_eraseblock()
594 jeb->offset); in jffs2_scan_eraseblock()
595 if (c->cleanmarker_size == 0) in jffs2_scan_eraseblock()
596 return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ in jffs2_scan_eraseblock()
601 jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, in jffs2_scan_eraseblock()
602 jeb->offset + ofs); in jffs2_scan_eraseblock()
610 ofs += jeb->offset; in jffs2_scan_eraseblock()
614 dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); in jffs2_scan_eraseblock()
617 while(ofs < jeb->offset + c->sector_size) { in jffs2_scan_eraseblock()
629 pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); in jffs2_scan_eraseblock()
643 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { in jffs2_scan_eraseblock()
646 jeb->offset, c->sector_size, ofs, in jffs2_scan_eraseblock()
648 if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) in jffs2_scan_eraseblock()
654 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
664 node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; in jffs2_scan_eraseblock()
666 if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { in jffs2_scan_eraseblock()
672 scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); in jffs2_scan_eraseblock()
676 inbuf_ofs = ofs - buf_ofs; in jffs2_scan_eraseblock()
681 if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) in jffs2_scan_eraseblock()
695 if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && in jffs2_scan_eraseblock()
696 c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { in jffs2_scan_eraseblock()
698 EMPTY_SCAN_SIZE(c->sector_size)); in jffs2_scan_eraseblock()
707 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
726 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { in jffs2_scan_eraseblock()
734 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { in jffs2_scan_eraseblock()
741 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { in jffs2_scan_eraseblock()
749 if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { in jffs2_scan_eraseblock()
754 je16_to_cpu(node->magic)); in jffs2_scan_eraseblock()
761 crcnode.magic = node->magic; in jffs2_scan_eraseblock()
762 crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); in jffs2_scan_eraseblock()
763 crcnode.totlen = node->totlen; in jffs2_scan_eraseblock()
764 hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); in jffs2_scan_eraseblock()
766 if (hdr_crc != je32_to_cpu(node->hdr_crc)) { in jffs2_scan_eraseblock()
769 ofs, je16_to_cpu(node->magic), in jffs2_scan_eraseblock()
770 je16_to_cpu(node->nodetype), in jffs2_scan_eraseblock()
771 je32_to_cpu(node->totlen), in jffs2_scan_eraseblock()
772 je32_to_cpu(node->hdr_crc), in jffs2_scan_eraseblock()
780 if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { in jffs2_scan_eraseblock()
783 ofs, je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
791 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { in jffs2_scan_eraseblock()
795 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) in jffs2_scan_eraseblock()
797 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
801 switch(je16_to_cpu(node->nodetype)) { in jffs2_scan_eraseblock()
804 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
816 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
820 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { in jffs2_scan_eraseblock()
821 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
823 je32_to_cpu(node->totlen), buf_len, in jffs2_scan_eraseblock()
833 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
838 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { in jffs2_scan_eraseblock()
839 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
841 je32_to_cpu(node->totlen), buf_len, in jffs2_scan_eraseblock()
852 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
855 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { in jffs2_scan_eraseblock()
856 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); in jffs2_scan_eraseblock()
858 je32_to_cpu(node->totlen), buf_len, in jffs2_scan_eraseblock()
869 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
875 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { in jffs2_scan_eraseblock()
877 ofs, je32_to_cpu(node->totlen), in jffs2_scan_eraseblock()
878 c->cleanmarker_size); in jffs2_scan_eraseblock()
882 } else if (jeb->first_node) { in jffs2_scan_eraseblock()
884 ofs, jeb->offset); in jffs2_scan_eraseblock()
889 jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); in jffs2_scan_eraseblock()
891 ofs += PAD(c->cleanmarker_size); in jffs2_scan_eraseblock()
897 jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
898 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) in jffs2_scan_eraseblock()
900 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
904 switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { in jffs2_scan_eraseblock()
906 pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", in jffs2_scan_eraseblock()
907 je16_to_cpu(node->nodetype), ofs); in jffs2_scan_eraseblock()
908 c->flags |= JFFS2_SB_FLAG_RO; in jffs2_scan_eraseblock()
910 return -EROFS; in jffs2_scan_eraseblock()
911 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) in jffs2_scan_eraseblock()
913 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
918 je16_to_cpu(node->nodetype), ofs); in jffs2_scan_eraseblock()
919 return -EINVAL; in jffs2_scan_eraseblock()
923 je16_to_cpu(node->nodetype), ofs); in jffs2_scan_eraseblock()
924 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) in jffs2_scan_eraseblock()
926 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
931 je16_to_cpu(node->nodetype), ofs); in jffs2_scan_eraseblock()
933 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); in jffs2_scan_eraseblock()
937 ofs += PAD(je32_to_cpu(node->totlen)); in jffs2_scan_eraseblock()
945 if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { in jffs2_scan_eraseblock()
953 jeb->offset, jeb->free_size, jeb->dirty_size, in jffs2_scan_eraseblock()
954 jeb->unchecked_size, jeb->used_size, jeb->wasted_size); in jffs2_scan_eraseblock()
957 if (jeb->wasted_size) { in jffs2_scan_eraseblock()
958 jeb->dirty_size += jeb->wasted_size; in jffs2_scan_eraseblock()
959 c->dirty_size += jeb->wasted_size; in jffs2_scan_eraseblock()
960 c->wasted_size -= jeb->wasted_size; in jffs2_scan_eraseblock()
961 jeb->wasted_size = 0; in jffs2_scan_eraseblock()
975 if (ino > c->highest_ino) in jffs2_scan_make_ino_cache()
976 c->highest_ino = ino; in jffs2_scan_make_ino_cache()
985 ic->ino = ino; in jffs2_scan_make_ino_cache()
986 ic->nodes = (void *)ic; in jffs2_scan_make_ino_cache()
989 ic->pino_nlink = 1; in jffs2_scan_make_ino_cache()
997 uint32_t crc, ino = je32_to_cpu(ri->ino); in jffs2_scan_inode_node()
1002 this node; we can do all the CRC checking etc. later. There's a tradeoff here -- in jffs2_scan_inode_node()
1004 memory, then building all our in-core data structures and freeing the extra in jffs2_scan_inode_node()
1011 crc = crc32(0, ri, sizeof(*ri)-8); in jffs2_scan_inode_node()
1012 if (crc != je32_to_cpu(ri->node_crc)) { in jffs2_scan_inode_node()
1014 __func__, ofs, je32_to_cpu(ri->node_crc), crc); in jffs2_scan_inode_node()
1020 PAD(je32_to_cpu(ri->totlen))); in jffs2_scan_inode_node()
1027 return -ENOMEM; in jffs2_scan_inode_node()
1031 jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); in jffs2_scan_inode_node()
1033 jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", in jffs2_scan_inode_node()
1034 je32_to_cpu(ri->ino), je32_to_cpu(ri->version), in jffs2_scan_inode_node()
1035 je32_to_cpu(ri->offset), in jffs2_scan_inode_node()
1036 je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); in jffs2_scan_inode_node()
1038 pseudo_random += je32_to_cpu(ri->version); in jffs2_scan_inode_node()
1041 jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); in jffs2_scan_inode_node()
1060 crc = crc32(0, rd, sizeof(*rd)-8); in jffs2_scan_dirent_node()
1062 if (crc != je32_to_cpu(rd->node_crc)) { in jffs2_scan_dirent_node()
1064 __func__, ofs, je32_to_cpu(rd->node_crc), crc); in jffs2_scan_dirent_node()
1066 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) in jffs2_scan_dirent_node()
1071 pseudo_random += je32_to_cpu(rd->version); in jffs2_scan_dirent_node()
1074 checkedlen = strnlen(rd->name, rd->nsize); in jffs2_scan_dirent_node()
1075 if (checkedlen < rd->nsize) { in jffs2_scan_dirent_node()
1081 return -ENOMEM; in jffs2_scan_dirent_node()
1083 memcpy(&fd->name, rd->name, checkedlen); in jffs2_scan_dirent_node()
1084 fd->name[checkedlen] = 0; in jffs2_scan_dirent_node()
1086 crc = crc32(0, fd->name, checkedlen); in jffs2_scan_dirent_node()
1087 if (crc != je32_to_cpu(rd->name_crc)) { in jffs2_scan_dirent_node()
1089 __func__, ofs, je32_to_cpu(rd->name_crc), crc); in jffs2_scan_dirent_node()
1091 fd->name, je32_to_cpu(rd->ino)); in jffs2_scan_dirent_node()
1095 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) in jffs2_scan_dirent_node()
1099 ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); in jffs2_scan_dirent_node()
1102 return -ENOMEM; in jffs2_scan_dirent_node()
1105 fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), in jffs2_scan_dirent_node()
1106 PAD(je32_to_cpu(rd->totlen)), ic); in jffs2_scan_dirent_node()
1108 fd->next = NULL; in jffs2_scan_dirent_node()
1109 fd->version = je32_to_cpu(rd->version); in jffs2_scan_dirent_node()
1110 fd->ino = je32_to_cpu(rd->ino); in jffs2_scan_dirent_node()
1111 fd->nhash = full_name_hash(NULL, fd->name, checkedlen); in jffs2_scan_dirent_node()
1112 fd->type = rd->type; in jffs2_scan_dirent_node()
1113 jffs2_add_fd_to_list(c, fd, &ic->scan_dents); in jffs2_scan_dirent_node()
1116 jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); in jffs2_scan_dirent_node()
1137 struct list_head *n = head->next; in rotate_list()
1140 while(count--) { in rotate_list()
1141 n = n->next; in rotate_list()
1151 x = count_list(&c->clean_list); in jffs2_rotate_lists()
1154 rotate_list((&c->clean_list), rotateby); in jffs2_rotate_lists()
1157 x = count_list(&c->very_dirty_list); in jffs2_rotate_lists()
1160 rotate_list((&c->very_dirty_list), rotateby); in jffs2_rotate_lists()
1163 x = count_list(&c->dirty_list); in jffs2_rotate_lists()
1166 rotate_list((&c->dirty_list), rotateby); in jffs2_rotate_lists()
1169 x = count_list(&c->erasable_list); in jffs2_rotate_lists()
1172 rotate_list((&c->erasable_list), rotateby); in jffs2_rotate_lists()
1175 if (c->nr_erasing_blocks) { in jffs2_rotate_lists()
1176 rotateby = pseudo_random % c->nr_erasing_blocks; in jffs2_rotate_lists()
1177 rotate_list((&c->erase_pending_list), rotateby); in jffs2_rotate_lists()
1180 if (c->nr_free_blocks) { in jffs2_rotate_lists()
1181 rotateby = pseudo_random % c->nr_free_blocks; in jffs2_rotate_lists()
1182 rotate_list((&c->free_list), rotateby); in jffs2_rotate_lists()