/fs/ubifs/ |
D | scan.c | 58 int offs, int quiet) in ubifs_scan_a_node() argument 66 dbg_scan("hit empty space at LEB %d:%d", lnum, offs); in ubifs_scan_a_node() 77 dbg_ntype(ch->node_type), lnum, offs); in ubifs_scan_a_node() 79 if (ubifs_check_node(c, buf, lnum, offs, quiet, 1)) in ubifs_scan_a_node() 89 offs + node_len + pad_len > c->leb_size) { in ubifs_scan_a_node() 92 lnum, offs); in ubifs_scan_a_node() 102 offs, offs + node_len + pad_len); in ubifs_scan_a_node() 107 lnum, offs, ALIGN(offs + node_len + pad_len, 8)); in ubifs_scan_a_node() 126 int offs, void *sbuf) in ubifs_start_scan() argument 131 dbg_scan("scan LEB %d:%d", lnum, offs); in ubifs_start_scan() [all …]
|
D | io.c | 87 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, in ubifs_leb_read() argument 92 err = ubi_read(c->ubi, lnum, buf, offs, len); in ubifs_leb_read() 99 len, lnum, offs, err); in ubifs_leb_read() 105 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, in ubifs_leb_write() argument 114 err = ubi_leb_write(c->ubi, lnum, buf, offs, len); in ubifs_leb_write() 116 err = dbg_leb_write(c, lnum, buf, offs, len); in ubifs_leb_write() 119 len, lnum, offs, err); in ubifs_leb_write() 226 int offs, int quiet, int must_chk_crc) in ubifs_check_node() argument 232 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_check_node() 233 ubifs_assert(c, !(offs & 7) && offs < c->leb_size); in ubifs_check_node() [all …]
|
D | recovery.c | 101 int err, offs, len; in get_master_node() local 113 offs = 0; in get_master_node() 116 while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) { in get_master_node() 121 offs += sz; in get_master_node() 126 if (offs) { in get_master_node() 129 offs -= sz; in get_master_node() 132 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); in get_master_node() 133 if (ret != SCANNED_A_NODE && offs) { in get_master_node() 135 offs -= sz; in get_master_node() 138 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); in get_master_node() [all …]
|
D | lpt_commit.c | 184 int lnum, offs, len, alen, done_lsave, done_ltab, err; in layout_cnodes() local 194 offs = c->nhead_offs; in layout_cnodes() 198 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { in layout_cnodes() 201 c->lsave_offs = offs; in layout_cnodes() 202 offs += c->lsave_sz; in layout_cnodes() 206 if (offs + c->ltab_sz <= c->leb_size) { in layout_cnodes() 209 c->ltab_offs = offs; in layout_cnodes() 210 offs += c->ltab_sz; in layout_cnodes() 222 while (offs + len > c->leb_size) { in layout_cnodes() 223 alen = ALIGN(offs, c->min_io_size); in layout_cnodes() [all …]
|
D | tnc.c | 61 else if (old_idx->offs < o->offs) in do_insert_old_idx() 63 else if (old_idx->offs > o->offs) in do_insert_old_idx() 97 static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) in insert_old_idx() argument 105 old_idx->offs = offs; in insert_old_idx() 125 return insert_old_idx(c, zbr->lnum, zbr->offs); in insert_old_idx_znode() 129 c->zroot.offs); in insert_old_idx_znode() 150 err = insert_old_idx(c, zbr->lnum, zbr->offs); in ins_clr_old_idx_znode() 154 zbr->offs = 0; in ins_clr_old_idx_znode() 159 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); in ins_clr_old_idx_znode() 163 c->zroot.offs = 0; in ins_clr_old_idx_znode() [all …]
|
D | log.c | 164 int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) in ubifs_add_bud_to_log() argument 204 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { in ubifs_add_bud_to_log() 225 bud->start = offs; in ubifs_add_bud_to_log() 231 ref->offs = cpu_to_le32(bud->start); in ubifs_add_bud_to_log() 319 c->cmt_bud_bytes += wbuf->offs - bud->start; in remove_buds() 322 wbuf->offs - bud->start, c->cmt_bud_bytes); in remove_buds() 323 bud->start = wbuf->offs; in remove_buds() 395 int offs = c->jheads[i].wbuf.offs; in ubifs_log_start_commit() local 397 if (lnum == -1 || offs == c->leb_size) in ubifs_log_start_commit() 401 lnum, offs, dbg_jhead(i)); in ubifs_log_start_commit() [all …]
|
D | replay.c | 47 int offs; member 248 r->lnum, r->offs, r->len, r->deletion, r->sqnum); in apply_replay_entry() 254 err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs, in apply_replay_entry() 279 err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs, in apply_replay_entry() 383 static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, in insert_node() argument 390 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs); in insert_node() 402 r->offs = offs; in insert_node() 432 static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len, in insert_dent() argument 440 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs); in insert_dent() 457 r->offs = offs; in insert_dent() [all …]
|
D | tnc_commit.c | 26 struct ubifs_znode *znode, int lnum, int offs, int len) in make_idx_node() argument 42 br->offs = cpu_to_le32(zbr->offs); in make_idx_node() 58 znode->offs = offs; in make_idx_node() 70 zbr->offs = offs; in make_idx_node() 75 c->zroot.offs = offs; in make_idx_node() 163 static int find_old_idx(struct ubifs_info *c, int lnum, int offs) in find_old_idx() argument 175 else if (offs < o->offs) in find_old_idx() 177 else if (offs > o->offs) in find_old_idx() 199 int level, int lnum, int offs) in is_idx_node_in_use() argument 203 ret = is_idx_node_in_tnc(c, key, level, lnum, offs); in is_idx_node_in_use() [all …]
|
D | tnc_misc.c | 269 int offs = zzbr->offs; in read_znode() local 278 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in read_znode() 286 ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs); in read_znode() 295 lnum, offs, znode->level, znode->child_cnt); in read_znode() 312 zbr->offs = le32_to_cpu(br->offs); in read_znode() 320 zbr->lnum >= c->leb_cnt || zbr->offs < 0 || in read_znode() 321 zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { in read_znode() 392 ubifs_err(c, "bad indexing node at LEB %d:%d, error %d", lnum, offs, err); in read_znode() 475 zbr->lnum, zbr->offs); in ubifs_tnc_read_node() 478 zbr->offs); in ubifs_tnc_read_node() [all …]
|
D | master.c | 94 int lnum, offs = 0, nodes_cnt, err; in scan_for_master() local 108 offs = snod->offs; in scan_for_master() 124 if (snod->offs != offs) in scan_for_master() 129 c->mst_offs = offs; in scan_for_master() 159 snod->type, lnum, snod->offs); in scan_for_master() 200 c->zroot.offs >= c->leb_size || c->zroot.offs & 7) { in validate_master() 356 c->zroot.offs = le32_to_cpu(c->mst_node->root_offs); in ubifs_read_master() 436 int err, lnum, offs, len; in ubifs_write_master() local 443 offs = c->mst_offs + c->mst_node_alsz; in ubifs_write_master() 446 if (offs + UBIFS_MST_NODE_SZ > c->leb_size) { in ubifs_write_master() [all …]
|
D | ubifs.h | 280 int offs; member 306 int offs; member 590 int offs; member 687 int offs; member 752 int offs; member 789 int offs; member 1599 const u8 *hash, int lnum, int offs); 1602 const u8 *hash, int lnum, int offs) {}; in ubifs_bad_hash() argument 1710 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, 1712 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, [all …]
|
D | journal.c | 100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; in reserve_space() local 118 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space() 126 lnum = ubifs_find_free_space(c, len, &offs, squeeze); in reserve_space() 167 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space() 176 lnum, wbuf->lnum, wbuf->offs + wbuf->used); in reserve_space() 183 offs = 0; in reserve_space() 197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs); in reserve_space() 200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); in reserve_space() 269 int *lnum, int *offs, int sync) in write_head() argument 277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; in write_head() [all …]
|
D | commit.c | 164 c->mst_node->root_offs = cpu_to_le32(zroot.offs); in do_commit() 517 int lnum, offs, len, err = 0; in dbg_old_index_check_init() local 522 offs = d->old_zroot.offs; in dbg_old_index_check_init() 529 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_old_index_check_init() 555 int lnum, offs, len, err = 0, last_level, child_cnt; in dbg_check_old_index() local 575 offs = d->old_zroot.offs; in dbg_check_old_index() 597 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_check_old_index() 686 offs = le32_to_cpu(br->offs); in dbg_check_old_index()
|
D | gc.c | 72 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum, in switch_gc_head() 73 c->leb_size - wbuf->offs - wbuf->used); in switch_gc_head() 260 snod->offs, 0); in sort_nodes() 305 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used; in move_node() 313 snod->offs, new_lnum, new_offs, in move_node() 357 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes() 379 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes() 497 ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || in ubifs_garbage_collect_leb() 556 snod->offs); in ubifs_garbage_collect_leb() 741 space_before = c->leb_size - wbuf->offs - wbuf->used; in ubifs_garbage_collect() [all …]
|
D | debug.c | 404 pr_err("\toffs %u\n", le32_to_cpu(ref->offs)); in ubifs_dump_node() 510 i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs), in ubifs_dump_node() 795 const struct ubifs_scan_leb *sleb, int offs) in ubifs_dump_sleb() argument 800 current->pid, sleb->lnum, offs); in ubifs_dump_sleb() 805 sleb->lnum, snod->offs, snod->len); in ubifs_dump_sleb() 836 snod->offs, snod->len); in ubifs_dump_leb() 862 znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip, in ubifs_dump_znode() 875 n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, in ubifs_dump_znode() 880 n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, in ubifs_dump_znode() 1210 zbr1->offs, dbg_snprintf_key(c, &key, key_buf, in dbg_check_key_order() [all …]
|
D | lpt.c | 389 pack_bits(c, &addr, &pos, nnode->nbranch[i].offs, in ubifs_pack_nnode() 758 nnode->nbranch[j].offs = boffs; in ubifs_create_dflt_lpt() 763 nnode->nbranch[j].offs = 0; in ubifs_create_dflt_lpt() 1030 nnode->nbranch[i].offs = ubifs_unpack_bits(c, &addr, &pos, in ubifs_unpack_nnode() 1124 int offs = nnode->nbranch[i].offs; in validate_nnode() local 1127 if (offs != 0) in validate_nnode() 1133 if (offs < 0 || offs > max_offs) in validate_nnode() 1208 int err, lnum, offs; in ubifs_read_nnode() local 1213 offs = branch->offs; in ubifs_read_nnode() 1216 offs = c->lpt_offs; in ubifs_read_nnode() [all …]
|
D | find.c | 481 int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, in ubifs_find_free_space() argument 549 *offs = c->leb_size - lprops->free; in ubifs_find_free_space() 552 if (*offs == 0) { in ubifs_find_free_space() 564 dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); in ubifs_find_free_space() 565 ubifs_assert(c, *offs <= c->leb_size - min_space); in ubifs_find_free_space()
|
/fs/crypto/ |
D | crypto.c | 104 unsigned int offs, gfp_t gfp_flags) in fscrypt_crypt_block() argument 130 sg_set_page(&dst, dest_page, len, offs); in fscrypt_crypt_block() 132 sg_set_page(&src, src_page, len, offs); in fscrypt_crypt_block() 174 unsigned int offs, in fscrypt_encrypt_pagecache_blocks() argument 183 (offs >> blockbits); in fscrypt_encrypt_pagecache_blocks() 190 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) in fscrypt_encrypt_pagecache_blocks() 197 for (i = offs; i < offs + len; i += blocksize, lblk_num++) { in fscrypt_encrypt_pagecache_blocks() 230 unsigned int len, unsigned int offs, in fscrypt_encrypt_block_inplace() argument 234 len, offs, gfp_flags); in fscrypt_encrypt_block_inplace() 256 unsigned int offs) in fscrypt_decrypt_pagecache_blocks() argument [all …]
|
/fs/nilfs2/ |
D | dir.c | 122 unsigned int offs, rec_len; in nilfs_check_page() local 134 for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { in nilfs_check_page() 135 p = (struct nilfs_dir_entry *)(kaddr + offs); in nilfs_check_page() 144 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) in nilfs_check_page() 147 if (offs != limit) in nilfs_check_page() 174 dir->i_ino, error, (page->index << PAGE_SHIFT) + offs, in nilfs_check_page() 179 p = (struct nilfs_dir_entry *)(kaddr + offs); in nilfs_check_page() 182 dir->i_ino, (page->index << PAGE_SHIFT) + offs, in nilfs_check_page()
|
/fs/ufs/ |
D | dir.c | 115 unsigned offs, rec_len; in ufs_check_page() local 128 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { in ufs_check_page() 129 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_page() 138 if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) in ufs_check_page() 144 if (offs != limit) in ufs_check_page() 175 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, in ufs_check_page() 179 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_page() 183 dir->i_ino, (page->index<<PAGE_SHIFT)+offs); in ufs_check_page()
|
/fs/ext2/ |
D | dir.c | 122 unsigned offs, rec_len; in ext2_check_page() local 134 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { in ext2_check_page() 135 p = (ext2_dirent *)(kaddr + offs); in ext2_check_page() 144 if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) in ext2_check_page() 149 if (offs != limit) in ext2_check_page() 181 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, in ext2_check_page() 187 p = (ext2_dirent *)(kaddr + offs); in ext2_check_page() 191 dir->i_ino, (page->index<<PAGE_SHIFT)+offs, in ext2_check_page()
|
D | xattr.c | 473 size_t offs = le16_to_cpu(last->e_value_offs); in ext2_xattr_set() local 474 if (offs < min_offs) in ext2_xattr_set() 475 min_offs = offs; in ext2_xattr_set() 576 size_t offs = le16_to_cpu(here->e_value_offs); in ext2_xattr_set() local 577 char *val = (char *)header + offs; in ext2_xattr_set() 600 if (o < offs) in ext2_xattr_set()
|
/fs/jffs2/ |
D | debug.h | 214 __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs); 250 #define jffs2_dbg_dump_buffer(buf, len, offs) \ argument 251 __jffs2_dbg_dump_buffer(*buf, len, offs); 261 #define jffs2_dbg_dump_buffer(buf, len, offs) argument
|
D | debug.c | 737 __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) in __jffs2_dbg_dump_buffer() argument 743 offs, offs + len, len); in __jffs2_dbg_dump_buffer() 744 i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; in __jffs2_dbg_dump_buffer() 745 offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); in __jffs2_dbg_dump_buffer() 748 printk(JFFS2_DBG "%#08x: ", offs); in __jffs2_dbg_dump_buffer() 757 offs += JFFS2_BUFDUMP_BYTES_PER_LINE; in __jffs2_dbg_dump_buffer() 758 printk(JFFS2_DBG "%0#8x: ", offs); in __jffs2_dbg_dump_buffer()
|
/fs/gfs2/ |
D | bmap.c | 1381 unsigned int offs; in gfs2_journaled_truncate() local 1387 offs = oldsize & ~PAGE_MASK; in gfs2_journaled_truncate() 1388 if (offs && chunk > PAGE_SIZE) in gfs2_journaled_truncate() 1389 chunk = offs + ((chunk - offs) & PAGE_MASK); in gfs2_journaled_truncate() 1418 unsigned int offs = newsize & (blocksize - 1); in trunc_start() local 1419 if (offs) { in trunc_start() 1421 blocksize - offs); in trunc_start() 2416 unsigned int offs; in gfs2_journaled_truncate_range() local 2422 offs = offset & ~PAGE_MASK; in gfs2_journaled_truncate_range() 2423 if (offs && chunk > PAGE_SIZE) in gfs2_journaled_truncate_range() [all …]
|