/fs/jffs2/ |
D | scan.c | 47 struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); 49 struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); 288 uint32_t ofs, uint32_t len) in jffs2_fill_scan_buf() argument 293 ret = jffs2_flash_read(c, ofs, len, &retlen, buf); in jffs2_fill_scan_buf() 296 len, ofs, ret); in jffs2_fill_scan_buf() 301 ofs, retlen); in jffs2_fill_scan_buf() 328 struct jffs2_raw_xattr *rx, uint32_t ofs, in jffs2_scan_xattr_node() argument 338 ofs, je32_to_cpu(rx->node_crc), crc); in jffs2_scan_xattr_node() 351 ofs, je32_to_cpu(rx->totlen), totlen); in jffs2_scan_xattr_node() 363 = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); in jffs2_scan_xattr_node() [all …]
|
D | nodelist.c | 66 if (frag && frag->ofs != size) { in jffs2_truncate_fragtree() 67 if (frag->ofs+frag->size > size) { in jffs2_truncate_fragtree() 68 frag->size = size - frag->ofs; in jffs2_truncate_fragtree() 72 while (frag && frag->ofs >= size) { in jffs2_truncate_fragtree() 88 if (frag->ofs + frag->size < size) in jffs2_truncate_fragtree() 89 return frag->ofs + frag->size; in jffs2_truncate_fragtree() 93 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) { in jffs2_truncate_fragtree() 95 frag->ofs, frag->ofs + frag->size); in jffs2_truncate_fragtree() 109 ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); in jffs2_obsolete_node_frag() 114 …ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); in jffs2_obsolete_node_frag() [all …]
|
D | read.c | 25 int ofs, int len) in jffs2_read_dnode() argument 71 D1(if(ofs + len > je32_to_cpu(ri->dsize)) { in jffs2_read_dnode() 73 len, ofs, je32_to_cpu(ri->dsize)); in jffs2_read_dnode() 143 memcpy(buf, decomprbuf+ofs, len); in jffs2_read_dnode() 177 if (unlikely(!frag || frag->ofs > offset || in jffs2_read_inode_range() 178 frag->ofs + frag->size <= offset)) { in jffs2_read_inode_range() 180 if (frag && frag->ofs > offset) { in jffs2_read_inode_range() 182 f->inocache->ino, frag->ofs, offset); in jffs2_read_inode_range() 183 holesize = min(holesize, frag->ofs - offset); in jffs2_read_inode_range() 192 uint32_t holeend = min(end, frag->ofs + frag->size); in jffs2_read_inode_range() [all …]
|
D | readinode.c | 37 uint32_t crc, ofs, len; in check_node_data() local 43 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); in check_node_data() 47 int adj = ofs % c->wbuf_pagesize; in check_node_data() 53 ref_offset(ref), tn->csize, ofs); in check_node_data() 57 ofs += adj; in check_node_data() 62 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); in check_node_data() 67 err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); in check_node_data() 70 mtd_unpoint(c->mtd, ofs, retlen); in check_node_data() 85 err = jffs2_flash_read(c, ofs, len, &retlen, buffer); in check_node_data() 87 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); in check_node_data() [all …]
|
D | compr_rubin.c | 33 unsigned int ofs; member 48 unsigned buflen, unsigned ofs, in init_pushpull() argument 53 pp->ofs = ofs; in init_pushpull() 59 if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) in pushbit() 63 pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7))); in pushbit() 65 pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7))); in pushbit() 67 pp->ofs++; in pushbit() 74 return pp->ofs; in pushedbits() 81 bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; in pullbit() 83 pp->ofs++; in pullbit()
|
D | gc.c | 537 end = frag->ofs + frag->size; in jffs2_garbage_collect_live() 539 start = frag->ofs; in jffs2_garbage_collect_live() 808 ilen = last_frag->ofs + last_frag->size; in jffs2_garbage_collect_metadata() 1087 ilen = frag->ofs + frag->size; in jffs2_garbage_collect_hole() 1139 for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); in jffs2_garbage_collect_hole() 1141 if (frag->ofs > fn->size + fn->ofs) in jffs2_garbage_collect_hole() 1202 BUG_ON(frag->ofs != start); in jffs2_garbage_collect_dnode() 1205 while((frag = frag_prev(frag)) && frag->ofs >= min) { in jffs2_garbage_collect_dnode() 1209 if (frag->ofs > min) { in jffs2_garbage_collect_dnode() 1211 frag->ofs, frag->ofs+frag->size); in jffs2_garbage_collect_dnode() [all …]
|
D | wbuf.c | 231 uint32_t ofs) in jffs2_verify_write() argument 237 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); in jffs2_verify_write() 244 __func__, ofs, retlen, c->wbuf_pagesize); in jffs2_verify_write() 283 uint32_t start, end, ofs, len; in jffs2_wbuf_recover() local 406 ofs = write_ofs(c); in jffs2_wbuf_recover() 418 towrite, ofs); in jffs2_wbuf_recover() 423 pr_notice("Faking write error at 0x%08x\n", ofs); in jffs2_wbuf_recover() 425 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); in jffs2_wbuf_recover() 429 ret = mtd_write(c->mtd, ofs, towrite, &retlen, in jffs2_wbuf_recover() 432 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { in jffs2_wbuf_recover() [all …]
|
D | debug.h | 192 uint32_t ofs, int len); 216 __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs); 227 #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \ argument 228 __jffs2_dbg_prewrite_paranoia_check(c, ofs, len) 234 #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) argument 252 #define jffs2_dbg_dump_node(c, ofs) \ argument 253 __jffs2_dbg_dump_node(c, ofs); 262 #define jffs2_dbg_dump_node(c, ofs) argument
|
D | summary.c | 116 uint32_t ofs) in jffs2_sum_add_inode_mem() argument 126 temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */ in jffs2_sum_add_inode_mem() 134 uint32_t ofs) in jffs2_sum_add_dirent_mem() argument 144 temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */ in jffs2_sum_add_dirent_mem() 158 int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) in jffs2_sum_add_xattr_mem() argument 169 temp->offset = cpu_to_je32(ofs); in jffs2_sum_add_xattr_mem() 176 int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) in jffs2_sum_add_xref_mem() argument 185 temp->offset = cpu_to_je32(ofs); in jffs2_sum_add_xref_mem() 249 unsigned long count, uint32_t ofs) in jffs2_sum_add_kvec() argument 260 jeb = &c->blocks[ofs / c->sector_size]; in jffs2_sum_add_kvec() [all …]
|
D | debug.c | 98 if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag) in __jffs2_dbg_fragtree_paranoia_check_nolock() 105 if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag) in __jffs2_dbg_fragtree_paranoia_check_nolock() 108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); in __jffs2_dbg_fragtree_paranoia_check_nolock() 126 uint32_t ofs, int len) in __jffs2_dbg_prewrite_paranoia_check() argument 136 ret = jffs2_flash_read(c, ofs, len, &retlen, buf); in __jffs2_dbg_prewrite_paranoia_check() 151 ofs, ofs + i); in __jffs2_dbg_prewrite_paranoia_check() 152 __jffs2_dbg_dump_buffer(buf, len, ofs); in __jffs2_dbg_prewrite_paranoia_check() 713 this->ofs, this->ofs+this->size, ref_offset(this->node->raw), in __jffs2_dbg_dump_fragtree_nolock() 718 this->ofs, this->ofs+this->size, this, frag_left(this), in __jffs2_dbg_dump_fragtree_nolock() 720 if (this->ofs != lastofs) in __jffs2_dbg_dump_fragtree_nolock() [all …]
|
D | os-linux.h | 78 #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, b… argument 79 #define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf)) argument 116 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char… 117 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf); 194 int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
D | writev.c | 32 int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, in jffs2_flash_direct_write() argument 36 ret = mtd_write(c->mtd, ofs, len, retlen, buf); in jffs2_flash_direct_write() 45 res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); in jffs2_flash_direct_write()
|
D | erase.c | 343 uint32_t ofs; in jffs2_block_check_erase() local 388 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { in jffs2_block_check_erase() 389 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); in jffs2_block_check_erase() 392 *bad_offset = ofs; in jffs2_block_check_erase() 394 ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); in jffs2_block_check_erase() 397 ofs, ret); in jffs2_block_check_erase() 403 ofs, readlen, retlen); in jffs2_block_check_erase() 418 ofs += readlen; in jffs2_block_check_erase()
|
D | summary.h | 185 int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); 186 int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); 187 int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs); 188 int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs);
|
D | nodemgmt.c | 501 uint32_t ofs, uint32_t len, in jffs2_add_physical_node_ref() argument 507 jeb = &c->blocks[ofs / c->sector_size]; in jffs2_add_physical_node_ref() 510 __func__, ofs & ~3, ofs & 3, len); in jffs2_add_physical_node_ref() 515 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) in jffs2_add_physical_node_ref() 516 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { in jffs2_add_physical_node_ref() 518 ofs & ~3, ofs & 3); in jffs2_add_physical_node_ref() 530 new = jffs2_link_node_ref(c, jeb, ofs, len, ic); in jffs2_add_physical_node_ref()
|
D | nodelist.h | 216 uint32_t ofs; /* The offset to which the data of this node belongs */ member 274 uint32_t ofs; /* The offset to which this fragment belongs */ member 377 uint32_t ofs, uint32_t len, 390 uint32_t ofs, uint32_t len, 455 int ofs, int len);
|
/fs/f2fs/ |
D | extent_cache.c | 19 unsigned int ofs) in __lookup_rb_tree_fast() argument 22 if (cached_re->ofs <= ofs && in __lookup_rb_tree_fast() 23 cached_re->ofs + cached_re->len > ofs) { in __lookup_rb_tree_fast() 31 unsigned int ofs) in __lookup_rb_tree_slow() argument 39 if (ofs < re->ofs) in __lookup_rb_tree_slow() 41 else if (ofs >= re->ofs + re->len) in __lookup_rb_tree_slow() 50 struct rb_entry *cached_re, unsigned int ofs) in f2fs_lookup_rb_tree() argument 54 re = __lookup_rb_tree_fast(cached_re, ofs); in f2fs_lookup_rb_tree() 56 return __lookup_rb_tree_slow(root, ofs); in f2fs_lookup_rb_tree() 64 unsigned int ofs, bool *leftmost) in f2fs_lookup_rb_tree_for_insert() argument [all …]
|
D | node.h | 268 nid_t ino, unsigned int ofs, bool reset) in fill_node_footer() argument 282 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | in fill_node_footer() 344 unsigned int ofs = ofs_of_node(node_page); in IS_DNODE() local 346 if (f2fs_has_xattr_block(ofs)) in IS_DNODE() 349 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || in IS_DNODE() 350 ofs == 5 + 2 * NIDS_PER_BLOCK) in IS_DNODE() 352 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { in IS_DNODE() 353 ofs -= 6 + 2 * NIDS_PER_BLOCK; in IS_DNODE() 354 if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) in IS_DNODE()
|
/fs/overlayfs/ |
D | util.c | 24 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; in ovl_want_write() local 25 return mnt_want_write(ofs->upper_mnt); in ovl_want_write() 30 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; in ovl_drop_write() local 31 mnt_drop_write(ofs->upper_mnt); in ovl_drop_write() 36 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; in ovl_workdir() local 37 return ofs->workdir; in ovl_workdir() 42 struct ovl_fs *ofs = sb->s_fs_info; in ovl_override_creds() local 44 if (!ofs->config.override_creds) in ovl_override_creds() 46 return override_creds(ofs->creator_cred); in ovl_override_creds() 63 struct ovl_fs *ofs = sb->s_fs_info; in ovl_same_sb() local [all …]
|
D | super.c | 280 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; in ovl_statfs() local 289 buf->f_namelen = ofs->namelen; in ovl_statfs() 647 static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs, in ovl_check_namelen() argument 656 ofs->namelen = max(ofs->namelen, statfs.f_namelen); in ovl_check_namelen() 662 struct ovl_fs *ofs, int *stack_depth, bool *remote) in ovl_lower_dir() argument 670 err = ovl_check_namelen(path, ofs, name); in ovl_lower_dir() 683 if (ofs->config.index && !ovl_can_decode_fh(path->dentry->d_sb)) { in ovl_lower_dir() 684 ofs->config.index = false; in ovl_lower_dir()
|
/fs/ntfs/ |
D | aops.c | 90 int ofs; in ntfs_end_buffer_async_read() local 93 ofs = 0; in ntfs_end_buffer_async_read() 95 ofs = init_size - file_ofs; in ntfs_end_buffer_async_read() 98 memset(kaddr + bh_offset(bh) + ofs, 0, in ntfs_end_buffer_async_read() 99 bh->b_size - ofs); in ntfs_end_buffer_async_read() 1125 unsigned int ofs; in ntfs_write_mst_block() local 1131 ofs = bh_offset(tbh); in ntfs_write_mst_block() 1137 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) in ntfs_write_mst_block() 1142 (MFT_RECORD*)(kaddr + ofs), &tni)) { in ntfs_write_mst_block() 1167 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs), in ntfs_write_mst_block() [all …]
|
D | mft.c | 55 unsigned ofs; in map_mft_record_page() local 66 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; in map_mft_record_page() 74 if (index > end_index || (i_size & ~PAGE_MASK) < ofs + in map_mft_record_page() 89 ofs)))) { in map_mft_record_page() 91 ni->page_ofs = ofs; in map_mft_record_page() 92 return page_address(page) + ofs; in map_mft_record_page() 1135 s64 pass_end, ll, data_pos, pass_start, ofs, bit; in ntfs_mft_bitmap_find_and_alloc_free_rec_nolock() local 1181 ofs = data_pos >> 3; in ntfs_mft_bitmap_find_and_alloc_free_rec_nolock() 1182 page_ofs = ofs & ~PAGE_MASK; in ntfs_mft_bitmap_find_and_alloc_free_rec_nolock() 1184 ll = ((pass_end + 7) >> 3) - ofs; in ntfs_mft_bitmap_find_and_alloc_free_rec_nolock() [all …]
|
D | file.c | 1217 int ofs = 0; in ntfs_prepare_pages_for_non_resident_write() local 1220 ofs = initialized_size - bh_pos; in ntfs_prepare_pages_for_non_resident_write() 1221 zero_user_segment(page, bh_offset(bh) + ofs, in ntfs_prepare_pages_for_non_resident_write() 1700 unsigned ofs, struct iov_iter *i, size_t bytes) in ntfs_copy_from_user_iter() argument 1708 len = PAGE_SIZE - ofs; in ntfs_copy_from_user_iter() 1711 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, in ntfs_copy_from_user_iter() 1720 ofs = 0; in ntfs_copy_from_user_iter() 1795 unsigned ofs, do_pages, u; in ntfs_perform_write() local 1799 ofs = pos & ~PAGE_MASK; in ntfs_perform_write() 1800 bytes = PAGE_SIZE - ofs; in ntfs_perform_write() [all …]
|
/fs/omfs/ |
D | dir.c | 25 const char *name, int namelen, int *ofs) in omfs_get_bucket() argument 30 *ofs = OMFS_DIR_START + bucket * 8; in omfs_get_bucket() 71 int ofs; in omfs_find_entry() local 74 bh = omfs_get_bucket(dir, name, namelen, &ofs); in omfs_find_entry() 78 block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); in omfs_find_entry() 120 int ofs; in omfs_add_link() local 123 bh = omfs_get_bucket(dir, name, namelen, &ofs); in omfs_add_link() 127 entry = (__be64 *) &bh->b_data[ofs]; in omfs_add_link() 166 int ofs; in omfs_delete_entry() local 170 bh = omfs_get_bucket(dir, name, namelen, &ofs); in omfs_delete_entry() [all …]
|
/fs/cifs/ |
D | link.c | 128 unsigned int ofs; in format_mf_symlink() local 150 ofs = CIFS_MF_SYMLINK_LINK_OFFSET; in format_mf_symlink() 151 memcpy(buf + ofs, link_str, link_len); in format_mf_symlink() 153 ofs += link_len; in format_mf_symlink() 154 if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) { in format_mf_symlink() 155 buf[ofs] = '\n'; in format_mf_symlink() 156 ofs++; in format_mf_symlink() 159 while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) { in format_mf_symlink() 160 buf[ofs] = ' '; in format_mf_symlink() 161 ofs++; in format_mf_symlink()
|