/fs/ubifs/ |
D | lpt_commit.c | 157 if (c->ltab[i].free == c->leb_size) { in alloc_lpt_leb() 167 if (c->ltab[i].free == c->leb_size) { in alloc_lpt_leb() 198 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { in layout_cnodes() 206 if (offs + c->ltab_sz <= c->leb_size) { in layout_cnodes() 222 while (offs + len > c->leb_size) { in layout_cnodes() 224 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes() 225 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in layout_cnodes() 265 if (offs + c->lsave_sz > c->leb_size) { in layout_cnodes() 267 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes() 268 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in layout_cnodes() [all …]
|
D | lprops.c | 402 if (lprops->free == c->leb_size) { in ubifs_categorize_lprops() 407 if (lprops->free + lprops->dirty == c->leb_size) { in ubifs_categorize_lprops() 557 if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) in ubifs_change_lp() 569 c->lst.total_used -= c->leb_size - old_spc; in ubifs_change_lp() 577 if (free == c->leb_size) { in ubifs_change_lp() 578 if (lprops->free != c->leb_size) in ubifs_change_lp() 580 } else if (lprops->free == c->leb_size) in ubifs_change_lp() 610 c->lst.total_used += c->leb_size - new_spc; in ubifs_change_lp() 613 if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) in ubifs_change_lp() 790 ubifs_assert(c, lprops->free == c->leb_size); in ubifs_fast_find_empty() [all …]
|
D | io.c | 233 ubifs_assert(c, !(offs & 7) && offs < c->leb_size); in ubifs_check_node() 252 if (node_len + offs > c->leb_size) in ubifs_check_node() 289 int safe_len = min3(node_len, c->leb_size - offs, in ubifs_check_node() 561 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size); in ubifs_wbuf_sync_nolock() 566 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_sync_nolock() 596 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_sync_nolock() 597 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_sync_nolock() 609 c->leb_size - wbuf->offs, dirt); in ubifs_wbuf_sync_nolock() 629 ubifs_assert(c, offs >= 0 && offs <= c->leb_size); in ubifs_wbuf_seek_nolock() 637 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_seek_nolock() [all …]
|
D | sb.c | 107 if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL) in create_default_filesystem() 108 jnl_lebs = DEFAULT_MAX_JNL / c->leb_size; in create_default_filesystem() 116 tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1; in create_default_filesystem() 117 log_lebs = tmp / c->leb_size; in create_default_filesystem() 175 tmp64 = (long long)max_buds * c->leb_size; in create_default_filesystem() 194 sup->leb_size = cpu_to_le32(c->leb_size); in create_default_filesystem() 213 main_bytes = (long long)main_lebs * c->leb_size; in create_default_filesystem() 399 if (le32_to_cpu(sup->leb_size) != c->leb_size) { in validate_sb() 401 le32_to_cpu(sup->leb_size), c->leb_size); in validate_sb() 439 max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS; in validate_sb() [all …]
|
D | lpt.c | 70 c->space_bits = fls(c->leb_size) - 3; in do_calc_lpt_geom() 72 c->lpt_offs_bits = fls(c->leb_size - 1); in do_calc_lpt_geom() 73 c->lpt_spc_bits = fls(c->leb_size); in do_calc_lpt_geom() 110 while (sz > c->leb_size) { in do_calc_lpt_geom() 112 sz -= c->leb_size; in do_calc_lpt_geom() 134 lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); in ubifs_calc_lpt_geom() 141 if (c->ltab_sz > c->leb_size) { in ubifs_calc_lpt_geom() 184 if (c->lpt_sz > c->leb_size) { in calc_dflt_lpt_geom() 193 lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); in calc_dflt_lpt_geom() 203 if (c->ltab_sz > c->leb_size) { in calc_dflt_lpt_geom() [all …]
|
D | log.c | 94 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; in empty_log_bytes() 95 t = (long long)c->ltail_lnum * c->leb_size; in empty_log_bytes() 144 c->bud_bytes += c->leb_size - bud->start; in ubifs_add_bud() 204 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { in ubifs_add_bud_to_log() 234 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { in ubifs_add_bud_to_log() 325 c->cmt_bud_bytes += c->leb_size - bud->start; in remove_buds() 328 c->leb_size - bud->start, c->cmt_bud_bytes); in remove_buds() 397 if (lnum == -1 || offs == c->leb_size) in ubifs_log_start_commit() 441 ubifs_assert(c, c->lhead_offs < c->leb_size); in ubifs_log_start_commit() 485 c->min_log_bytes = c->leb_size; in ubifs_log_end_commit() [all …]
|
D | tnc_commit.c | 141 if (gap_end == c->leb_size) { in fill_gap() 287 c->ileb_len = c->leb_size; in layout_leb_in_gaps() 288 gap_end = c->leb_size; in layout_leb_in_gaps() 301 if (lp.free == c->leb_size) { in layout_leb_in_gaps() 307 c->leb_size - c->ileb_len, in layout_leb_in_gaps() 314 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt, in layout_leb_in_gaps() 339 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz; in get_leb_cnt() 342 d = c->leb_size / c->max_idx_node_sz; in get_leb_cnt() 449 if (buf_offs + next_len > c->leb_size) in layout_in_empty_space() 515 buf_offs + used + next_len <= c->leb_size && in layout_in_empty_space() [all …]
|
D | master.c | 196 c->lhead_offs < 0 || c->lhead_offs >= c->leb_size || in validate_master() 203 c->zroot.offs >= c->leb_size || c->zroot.offs & 7) { in validate_master() 221 c->ihead_offs > c->leb_size || c->ihead_offs & 7) { in validate_master() 226 main_sz = (long long)c->main_lebs * c->leb_size; in validate_master() 233 c->lpt_offs < 0 || c->lpt_offs + c->nnode_sz > c->leb_size) { in validate_master() 240 c->nhead_offs > c->leb_size) { in validate_master() 247 c->ltab_offs + c->ltab_sz > c->leb_size) { in validate_master() 254 c->lsave_offs + c->lsave_sz > c->leb_size)) { in validate_master() 405 c->lst.total_free += growth * (long long)c->leb_size; in ubifs_read_master() 449 if (offs + UBIFS_MST_NODE_SZ > c->leb_size) { in ubifs_write_master()
|
D | recovery.c | 104 sbuf = vmalloc(c->leb_size); in get_master_node() 108 err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0); in get_master_node() 115 len = c->leb_size; in get_master_node() 116 while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) { in get_master_node() 160 if (offs < c->leb_size) { in get_master_node() 170 if (offs < c->leb_size) in get_master_node() 267 c->leb_size - offs2 - sz < sz) { in ubifs_recover_master_node() 292 if (offs2 + sz + sz <= c->leb_size) in ubifs_recover_master_node() 412 check_len = c->leb_size - empty_offs; in is_last_write() 443 memset(*buf, 0xff, c->leb_size - empty_offs); in clean_buf() [all …]
|
D | super.c | 513 c->leb_size = c->vi.usable_leb_size; in init_constants_early() 515 c->half_leb_size = c->leb_size / 2; in init_constants_early() 521 if (c->leb_size < UBIFS_MIN_LEB_SZ) { in init_constants_early() 523 c->leb_size, UBIFS_MIN_LEB_SZ); in init_constants_early() 581 c->ranges[UBIFS_SIG_NODE].max_len = c->leb_size - UBIFS_SB_NODE_SZ; in init_constants_early() 587 c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size; in init_constants_early() 617 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; in init_constants_early() 621 if (c->max_bu_buf_len > c->leb_size) in init_constants_early() 622 c->max_bu_buf_len = c->leb_size; in init_constants_early() 625 c->min_log_bytes = c->leb_size; in init_constants_early() [all …]
|
D | gc.c | 77 c->leb_size - wbuf->offs - wbuf->used); in switch_gc_head() 361 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes() 383 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes() 506 if (lp->free + lp->dirty == c->leb_size) { in ubifs_garbage_collect_leb() 511 if (lp->free != c->leb_size) { in ubifs_garbage_collect_leb() 520 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size, in ubifs_garbage_collect_leb() 581 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, in ubifs_garbage_collect_leb() 598 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); in ubifs_garbage_collect_leb() 745 space_before = c->leb_size - wbuf->offs - wbuf->used; in ubifs_garbage_collect() 785 space_after = c->leb_size - wbuf->offs - wbuf->used; in ubifs_garbage_collect() [all …]
|
D | find.c | 102 if (lprops->free + lprops->dirty == c->leb_size) { in scan_for_dirty_cb() 183 lprops->free + lprops->dirty == c->leb_size)); in scan_for_dirty() 318 (pick_free && lp->free + lp->dirty == c->leb_size)); in ubifs_find_dirty_leb() 369 if (!data->pick_free && lprops->free == c->leb_size) in scan_for_free_cb() 377 if (lprops->free + lprops->dirty == c->leb_size && lprops->dirty > 0) in scan_for_free_cb() 549 *offs = c->leb_size - lprops->free; in ubifs_find_free_space() 564 dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); in ubifs_find_free_space() 565 ubifs_assert(c, *offs <= c->leb_size - min_space); in ubifs_find_free_space() 606 if (lprops->free + lprops->dirty != c->leb_size) in scan_for_idx_cb() 639 ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); in scan_for_leb_for_idx() [all …]
|
D | scan.c | 89 offs + node_len + pad_len > c->leb_size) { in ubifs_scan_a_node() 141 err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); in ubifs_start_scan() 144 c->leb_size - offs, lnum, offs, err); in ubifs_start_scan() 231 len = c->leb_size - offs; in ubifs_scanned_corruption() 258 int err, len = c->leb_size - offs; in ubifs_scan()
|
D | orphan.c | 278 ((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)); in avail_orphs() 279 gap = c->leb_size - c->ohead_offs; in avail_orphs() 298 ((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)); in tot_avail_orphs() 350 gap = c->leb_size - c->ohead_offs; in write_orph_node() 354 gap = c->leb_size; in write_orph_node() 389 ubifs_assert(c, c->ohead_offs + len <= c->leb_size); in write_orph_node() 821 c->orph_buf = vmalloc(c->leb_size); in ubifs_mount_orphans() 982 buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); in dbg_scan_orphans()
|
D | replay.c | 102 if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) { in set_bud_lprops() 126 dirty -= c->leb_size - lp->free; in set_bud_lprops() 148 b->bud->lnum, c->leb_size - b->free); in set_bud_lprops() 829 b->free = c->leb_size - sleb->endpt; in replay_bud() 963 lnum < c->main_first || offs > c->leb_size || in validate_ref() 1124 if (sleb->endpt || c->lhead_offs >= c->leb_size) { in replay_log_leb() 1196 if (c->ihead_offs != c->leb_size - free) { in ubifs_replay_journal()
|
D | misc.h | 281 int max_xattrs = (c->leb_size / 2) / UBIFS_INO_NODE_SZ; in ubifs_xattr_max_cnt()
|
D | ubifs-media.h | 659 __le32 leb_size; member
|
D | tnc_misc.c | 321 zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { in read_znode()
|
D | budget.c | 223 available -= (long long)subtract_lebs * c->leb_size; in ubifs_calc_available()
|
D | debug.c | 335 pr_err("\tleb_size %u\n", le32_to_cpu(sup->leb_size)); in ubifs_dump_node() 657 lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, in ubifs_dump_lprop() 661 lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, in ubifs_dump_lprop() 818 buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); in ubifs_dump_leb()
|
D | journal.c | 118 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space() 167 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space()
|
D | ubifs.h | 1352 int leb_size; member
|
D | file.c | 755 ubifs_assert(c, bu->buf_len <= c->leb_size); in ubifs_do_bulk_read()
|
D | tnc.c | 1692 ubifs_assert(c, !(offs & 7) && offs < c->leb_size); in read_wbuf() 1693 ubifs_assert(c, offs + len <= c->leb_size); in read_wbuf()
|