/fs/ntfs3/ |
D | run.c | 21 CLST vcn; /* Virtual cluster number. */ member 34 bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) in run_lookup() argument 49 if (vcn < r->vcn) { in run_lookup() 54 if (vcn < r->vcn + r->len) { in run_lookup() 60 if (vcn >= r->vcn + r->len) { in run_lookup() 65 if (vcn >= r->vcn) { in run_lookup() 74 if (vcn < r->vcn) { in run_lookup() 78 } else if (vcn >= r->vcn + r->len) { in run_lookup() 104 CLST end = r->vcn + r->len; in run_consolidate() 108 if (n->vcn > end) in run_consolidate() [all …]
|
D | attrib.c | 88 struct runs_tree *run, const CLST *vcn) in attr_load_runs() argument 99 if (vcn && (evcn < *vcn || *vcn < svcn)) in attr_load_runs() 109 vcn ? *vcn : svcn, Add2Ptr(attr, run_off), in attr_load_runs() 121 CLST vcn, CLST len, CLST *done, bool trim) in run_deallocate_ex() argument 124 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0; in run_deallocate_ex() 130 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { in run_deallocate_ex() 155 vcn_next = vcn + clen; in run_deallocate_ex() 156 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || in run_deallocate_ex() 157 vcn != vcn_next) { in run_deallocate_ex() 174 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, in attr_allocate_clusters() argument [all …]
|
D | attrlist.c | 188 u8 name_len, const CLST *vcn) in al_find_ex() argument 207 le_vcn = le64_to_cpu(le->vcn); in al_find_ex() 222 if (!vcn) in al_find_ex() 225 if (*vcn == le_vcn) in al_find_ex() 228 if (*vcn < le_vcn) in al_find_ex() 245 u8 name_len, CLST vcn) in al_find_le_to_insert() argument 260 if (!le->vcn) { in al_find_le_to_insert() 274 if (le64_to_cpu(le->vcn) >= vcn) in al_find_le_to_insert() 334 le->vcn = cpu_to_le64(svcn); in al_add_le() 388 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn, in al_delete_le() argument [all …]
|
D | file.c | 119 CLST vcn, lcn, clen; in ntfs_extend_initialized_size() local 123 vcn = pos >> bits; in ntfs_extend_initialized_size() 125 err = attr_data_get_block(ni, vcn, 0, &lcn, &clen, in ntfs_extend_initialized_size() 131 loff_t vbo = (loff_t)vcn << bits; in ntfs_extend_initialized_size() 279 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, in ntfs_sparse_cluster() argument 284 u64 vbo = (u64)vcn << sbi->cluster_bits; in ntfs_sparse_cluster() 384 CLST vcn = from >> sbi->cluster_bits; in ntfs_file_mmap() local 388 for (; vcn < end; vcn += len) { in ntfs_file_mmap() 389 err = attr_data_get_block(ni, vcn, 1, &lcn, in ntfs_file_mmap() 396 ntfs_sparse_cluster(inode, NULL, vcn, 1); in ntfs_file_mmap() [all …]
|
D | ntfs_fs.h | 415 struct runs_tree *run, const CLST *vcn); 417 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, 428 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 434 CLST vcn); 460 u8 name_len, const CLST *vcn); 465 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn, 493 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, 515 u8 name_len, const CLST *vcn, 521 const __le16 *name, u8 name_len, CLST vcn, 778 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, [all …]
|
D | frecord.c | 187 const __le16 *name, u8 name_len, const CLST *vcn, in ni_find_attr() argument 205 le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn); in ni_find_attr() 223 if (vcn && *vcn) in ni_find_attr() 225 } else if (!vcn) { in ni_find_attr() 228 } else if (le64_to_cpu(attr->nres.svcn) > *vcn || in ni_find_attr() 229 *vcn > le64_to_cpu(attr->nres.evcn)) { in ni_find_attr() 281 const __le16 *name, u8 name_len, CLST vcn, in ni_load_attr() argument 304 if (vcn) { in ni_load_attr() 307 if (!next || le64_to_cpu(next->vcn) > vcn) in ni_load_attr() 325 if (le64_to_cpu(attr->nres.svcn) <= vcn && in ni_load_attr() [all …]
|
D | fsntfs.c | 783 CLST zone_limit, zone_max, lcn, vcn, len; in ntfs_refresh_zone() local 802 vcn = bytes_to_cluster(sbi, in ntfs_refresh_zone() 805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL)) in ntfs_refresh_zone() 1107 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next; in ntfs_sb_write_run() local 1111 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) in ntfs_sb_write_run() 1131 vcn_next = vcn + clen; in ntfs_sb_write_run() 1132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || in ntfs_sb_write_run() 1133 vcn != vcn_next) in ntfs_sb_write_run() 1174 CLST vcn_next, vcn = vbo >> cluster_bits; in ntfs_read_run_nb() local 1190 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { in ntfs_read_run_nb() [all …]
|
D | fslog.c | 154 __le64 vcn; // 0x10: Vcn of dirty page member 639 u32 target_attr, u64 vcn) in find_dp() argument 645 u64 dp_vcn = le64_to_cpu(dp->vcn); in find_dp() 647 if (dp->target_attr == ta && vcn >= dp_vcn && in find_dp() 648 vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) { in find_dp() 3738 u64 size, vcn, undo_next_lsn; in log_replay() local 4206 memmove(&dp->vcn, &dp0->vcn_low, in log_replay() 4228 next->vcn == dp->vcn) { in log_replay() 4501 dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1)); in log_replay() 4513 le64_to_cpu(dp->vcn)); in log_replay() [all …]
|
D | ntfs.h | 517 __le64 vcn; // 0x08: Starting VCN of this attribute. member 633 static inline void de_set_vbn_le(struct NTFS_DE *e, __le64 vcn) in de_set_vbn_le() argument 637 *v = vcn; in de_set_vbn_le() 640 static inline void de_set_vbn(struct NTFS_DE *e, CLST vcn) in de_set_vbn() argument 644 *v = cpu_to_le64(vcn); in de_set_vbn()
|
D | index.c | 210 CLST vcn = off >> sbi->cluster_bits; in bmp_buf_get() local 220 &vcn, &bbuf->mi); in bmp_buf_get() 379 CLST lcn, clen, vcn, vcn_next; in scan_nres_bitmap() local 390 vcn = vbo >> sbi->cluster_bits; in scan_nres_bitmap() 393 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); in scan_nres_bitmap() 403 name->name_len, run, vcn); in scan_nres_bitmap() 408 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); in scan_nres_bitmap() 449 vcn_next = vcn + clen; in scan_nres_bitmap() 451 ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next; in scan_nres_bitmap() 453 vcn = vcn_next; in scan_nres_bitmap()
|
D | inode.c | 126 if (le && le->vcn) { in ntfs_read_mft() 557 CLST vcn, lcn, len; in ntfs_get_block_vbo() local 586 vcn = vbo >> cluster_bits; in ntfs_get_block_vbo() 590 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL); in ntfs_get_block_vbo() 611 ntfs_sparse_cluster(inode, page, vcn, len); in ntfs_get_block_vbo() 1218 CLST vcn; in ntfs_create_inode() local 1544 &vcn); in ntfs_create_inode() 1548 if (vcn != clst) { in ntfs_create_inode()
|
/fs/ntfs/ |
D | runlist.c | 151 if ((dst->vcn + dst->length) != src->vcn) in ntfs_are_rl_mergeable() 238 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_append() 242 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_append() 286 disc = (src[0].vcn > 0); in ntfs_rl_insert() 296 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); in ntfs_rl_insert() 325 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_insert() 328 dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; in ntfs_rl_insert() 333 dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; in ntfs_rl_insert() 334 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_insert() 336 dst[loc].vcn = 0; in ntfs_rl_insert() [all …]
|
D | index.c | 108 VCN vcn, old_vcn; in ntfs_index_lookup() local 248 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_index_lookup() 264 page = ntfs_map_page(ia_mapping, vcn << in ntfs_index_lookup() 276 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_index_lookup() 288 (long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 291 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_index_lookup() 297 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 305 "driver bug.", (unsigned long long)vcn, in ntfs_index_lookup() 316 "driver.", (unsigned long long)vcn, in ntfs_index_lookup() 324 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() [all …]
|
D | dir.c | 80 VCN vcn, old_vcn; in ntfs_lookup_inode_by_name() local 292 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_lookup_inode_by_name() 308 page = ntfs_map_page(ia_mapping, vcn << in ntfs_lookup_inode_by_name() 320 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_lookup_inode_by_name() 332 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 335 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_lookup_inode_by_name() 341 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 350 (unsigned long long)vcn, dir_ni->mft_no, in ntfs_lookup_inode_by_name() 360 "driver.", (unsigned long long)vcn, in ntfs_lookup_inode_by_name() 368 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() [all …]
|
D | logfile.c | 714 VCN vcn, end_vcn; in ntfs_empty_logfile() local 738 vcn = 0; in ntfs_empty_logfile() 746 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile() 748 err = ntfs_map_runlist_nolock(log_ni, vcn, NULL); in ntfs_empty_logfile() 755 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile() 758 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile() 771 vcn = rl->vcn; in ntfs_empty_logfile() 782 if (rl[1].vcn > end_vcn) in ntfs_empty_logfile() 783 len = end_vcn - rl->vcn; in ntfs_empty_logfile() 819 } while ((++rl)->vcn < end_vcn); in ntfs_empty_logfile()
|
D | aops.c | 167 VCN vcn; in ntfs_read_block() local 242 vcn = (VCN)iblock << blocksize_bits >> in ntfs_read_block() 253 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block() 255 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block() 283 err = ntfs_map_runlist(ni, vcn); in ntfs_read_block() 308 ni->type, (unsigned long long)vcn, in ntfs_read_block() 532 VCN vcn; in ntfs_write_block() local 699 vcn = (VCN)block << blocksize_bits; in ntfs_write_block() 700 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_write_block() 701 vcn >>= vol->cluster_size_bits; in ntfs_write_block() [all …]
|
D | attrib.c | 70 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) in ntfs_map_runlist_nolock() argument 84 (unsigned long long)vcn); in ntfs_map_runlist_nolock() 120 if (vcn >= allocated_size_vcn || (a->type == ni->type && in ntfs_map_runlist_nolock() 125 <= vcn && end_vcn >= vcn)) in ntfs_map_runlist_nolock() 153 CASE_SENSITIVE, vcn, NULL, 0, ctx); in ntfs_map_runlist_nolock() 169 if (unlikely(vcn && vcn >= end_vcn)) { in ntfs_map_runlist_nolock() 284 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) in ntfs_map_runlist() argument 290 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist() 292 err = ntfs_map_runlist_nolock(ni, vcn, NULL); in ntfs_map_runlist() 327 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, in ntfs_attr_vcn_to_lcn_nolock() argument [all …]
|
D | attrib.h | 49 extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, 51 extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn); 53 extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, 57 const VCN vcn, ntfs_attr_search_ctx *ctx);
|
D | compress.c | 478 VCN vcn; in ntfs_read_compressed_block() local 596 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; in ntfs_read_compressed_block() 597 vcn++) { in ntfs_read_compressed_block() 607 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block() 609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block() 613 (unsigned long long)vcn, in ntfs_read_compressed_block() 630 if (!ntfs_map_runlist(ni, vcn)) in ntfs_read_compressed_block() 730 if (vcn == start_vcn - cb_clusters) { in ntfs_read_compressed_block() 773 } else if (vcn == start_vcn) { in ntfs_read_compressed_block()
|
D | lcnalloc.c | 388 rl[rlpos].vcn = rl[rlpos - 1].vcn + in ntfs_cluster_alloc() 393 rl[rlpos].vcn = start_vcn; in ntfs_cluster_alloc() 726 rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; in ntfs_cluster_alloc() 883 delta = start_vcn - rl->vcn; in __ntfs_cluster_free() 916 VCN vcn; in __ntfs_cluster_free() local 919 vcn = rl->vcn; in __ntfs_cluster_free() 920 rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); in __ntfs_cluster_free()
|
D | mft.c | 525 VCN vcn; in ntfs_sync_mft_mirror() local 531 vcn = ((VCN)mft_no << vol->mft_record_size_bits) + in ntfs_sync_mft_mirror() 533 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_sync_mft_mirror() 534 vcn >>= vol->cluster_size_bits; in ntfs_sync_mft_mirror() 546 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror() 548 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror() 718 VCN vcn; in write_mft_record_nolock() local 724 vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) + in write_mft_record_nolock() 726 vcn_ofs = vcn & vol->cluster_size_mask; in write_mft_record_nolock() 727 vcn >>= vol->cluster_size_bits; in write_mft_record_nolock() [all …]
|
D | runlist.h | 29 VCN vcn; /* vcn = Starting virtual cluster number. */ member 65 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn); 70 const VCN vcn);
|
D | file.c | 570 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend; in ntfs_prepare_pages_for_non_resident_write() local 625 vcn = lcn = -1; in ntfs_prepare_pages_for_non_resident_write() 703 cdelta = bh_cpos - vcn; in ntfs_prepare_pages_for_non_resident_write() 833 while (rl->length && rl[1].vcn <= bh_cpos) in ntfs_prepare_pages_for_non_resident_write() 842 vcn = bh_cpos; in ntfs_prepare_pages_for_non_resident_write() 843 vcn_len = rl[1].vcn - vcn; in ntfs_prepare_pages_for_non_resident_write() 854 if (likely(vcn + vcn_len >= cend)) { in ntfs_prepare_pages_for_non_resident_write() 1039 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn); in ntfs_prepare_pages_for_non_resident_write() 1040 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); in ntfs_prepare_pages_for_non_resident_write() 1057 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn, in ntfs_prepare_pages_for_non_resident_write() [all …]
|
D | debug.c | 143 (long long)(rl + i)->vcn, lcn_str[index], in ntfs_debug_dump_runlist() 149 (long long)(rl + i)->vcn, in ntfs_debug_dump_runlist()
|
D | super.c | 1166 rl2[0].vcn = 0; in check_mft_mirror() 1170 rl2[1].vcn = rl2[0].length; in check_mft_mirror() 1183 if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn || in check_mft_mirror()
|