Searched refs:fragments (Results 1 – 7 of 7) sorted by relevance
76 unsigned int fragments; in squashfs_fill_super() local182 TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); in squashfs_fill_super()218 fragments = le32_to_cpu(sblk->fragments); in squashfs_fill_super()219 if (fragments == 0) in squashfs_fill_super()231 le64_to_cpu(sblk->fragment_table_start), fragments); in squashfs_fill_super()
76 u64 fragment_table_start, unsigned int fragments) in squashfs_read_fragment_index_table() argument78 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); in squashfs_read_fragment_index_table()
40 int "Number of fragments cached" if SQUASHFS_EMBEDDED44 By default SquashFS caches the last 3 fragments read from46 has to re-read fragments less often from disk, at the expense
221 __le32 fragments; member
543 int fragments = 0; in __mb_check_buddy() local599 fragments++; in __mb_check_buddy()614 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); in __mb_check_buddy()682 unsigned fragments = 0; in ext4_mb_generate_buddy() local690 fragments++; in ext4_mb_generate_buddy()702 grp->bb_fragments = fragments; in ext4_mb_generate_buddy()1727 unsigned free, fragments; in ext4_mb_good_group() local1736 fragments = grp->bb_fragments; in ext4_mb_good_group()1739 if (fragments == 0) in ext4_mb_good_group()1756 if ((free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
49 including the linked list of node fragments (but see the notes below on
261 This allows us to map runlist fragments with the runlist lock already660 mapping any needed runlist fragments.