/fs/ocfs2/ |
D | blockcheck.c | 232 static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats) in ocfs2_blockcheck_debug_remove() argument 234 if (stats) { in ocfs2_blockcheck_debug_remove() 235 debugfs_remove_recursive(stats->b_debug_dir); in ocfs2_blockcheck_debug_remove() 236 stats->b_debug_dir = NULL; in ocfs2_blockcheck_debug_remove() 240 static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, in ocfs2_blockcheck_debug_install() argument 246 stats->b_debug_dir = dir; in ocfs2_blockcheck_debug_install() 249 &stats->b_check_count, &blockcheck_fops); in ocfs2_blockcheck_debug_install() 252 &stats->b_failure_count, &blockcheck_fops); in ocfs2_blockcheck_debug_install() 255 &stats->b_recover_count, &blockcheck_fops); in ocfs2_blockcheck_debug_install() 259 static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, in ocfs2_blockcheck_debug_install() argument [all …]
|
D | blockcheck.h | 46 struct ocfs2_blockcheck_stats *stats); 51 struct ocfs2_blockcheck_stats *stats); 54 void ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats, 56 void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats);
|
D | ioctl.c | 412 static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats, in o2ffg_update_stats() argument 415 if (chunksize > stats->ffs_max) in o2ffg_update_stats() 416 stats->ffs_max = chunksize; in o2ffg_update_stats() 418 if (chunksize < stats->ffs_min) in o2ffg_update_stats() 419 stats->ffs_min = chunksize; in o2ffg_update_stats() 421 stats->ffs_avg += chunksize; in o2ffg_update_stats() 422 stats->ffs_free_chunks_real++; in o2ffg_update_stats()
|
D | super.c | 111 struct ocfs2_blockcheck_stats *stats); 115 struct ocfs2_blockcheck_stats *stats); 725 struct ocfs2_blockcheck_stats *stats) in ocfs2_sb_probe() argument 791 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); in ocfs2_sb_probe() 792 spin_lock_init(&stats->b_lock); in ocfs2_sb_probe() 793 tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats); in ocfs2_sb_probe() 979 struct ocfs2_blockcheck_stats stats; in ocfs2_fill_super() local 989 status = ocfs2_sb_probe(sb, &bh, §or_size, &stats); in ocfs2_fill_super() 995 status = ocfs2_initialize_super(sb, bh, sector_size, &stats); in ocfs2_fill_super() 2002 struct ocfs2_blockcheck_stats *stats) in ocfs2_initialize_super() argument [all …]
|
/fs/cachefiles/ |
D | cache.c | 20 struct kstatfs stats; in cachefiles_add_cache() local 82 ret = vfs_statfs(&path, &stats); in cachefiles_add_cache() 87 if (stats.f_bsize <= 0) in cachefiles_add_cache() 91 if (stats.f_bsize > PAGE_SIZE) in cachefiles_add_cache() 94 cache->bsize = stats.f_bsize; in cachefiles_add_cache() 95 cache->bshift = ilog2(stats.f_bsize); in cachefiles_add_cache() 101 (unsigned long long) stats.f_blocks, in cachefiles_add_cache() 102 (unsigned long long) stats.f_bavail); in cachefiles_add_cache() 105 do_div(stats.f_files, 100); in cachefiles_add_cache() 106 cache->fstop = stats.f_files * cache->fstop_percent; in cachefiles_add_cache() [all …]
|
/fs/xfs/ |
D | xfs_stats.c | 10 static int counter_val(struct xfsstats __percpu *stats, int idx) in counter_val() argument 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val() 19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) in xfs_stats_format() argument 66 counter_val(stats, j)); in xfs_stats_format() 71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; in xfs_stats_format() 72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; in xfs_stats_format() 73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; in xfs_stats_format() 74 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; in xfs_stats_format() 91 void xfs_stats_clearall(struct xfsstats __percpu *stats) in xfs_stats_clearall() argument 100 vn_active = per_cpu_ptr(stats, c)->s.vn_active; in xfs_stats_clearall() [all …]
|
D | xfs_sysfs.c | 292 struct xstats *stats = to_xstats(kobject); in stats_show() local 294 return xfs_stats_format(stats->xs_stats, buf); in stats_show() 296 XFS_SYSFS_ATTR_RO(stats); 306 struct xstats *stats = to_xstats(kobject); in stats_clear_store() local 315 xfs_stats_clearall(stats->xs_stats); in stats_clear_store() 321 ATTR_LIST(stats),
|
D | xfs_stats.h | 159 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf); 160 void xfs_stats_clearall(struct xfsstats __percpu *stats);
|
/fs/jbd2/ |
D | commit.c | 384 struct transaction_stats_s stats; in jbd2_journal_commit_transaction() local 477 stats.run.rs_wait = commit_transaction->t_max_wait; in jbd2_journal_commit_transaction() 478 stats.run.rs_request_delay = 0; in jbd2_journal_commit_transaction() 479 stats.run.rs_locked = jiffies; in jbd2_journal_commit_transaction() 481 stats.run.rs_request_delay = in jbd2_journal_commit_transaction() 483 stats.run.rs_locked); in jbd2_journal_commit_transaction() 484 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start, in jbd2_journal_commit_transaction() 485 stats.run.rs_locked); in jbd2_journal_commit_transaction() 562 stats.run.rs_flushing = jiffies; in jbd2_journal_commit_transaction() 563 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, in jbd2_journal_commit_transaction() [all …]
|
D | journal.c | 1151 struct transaction_stats_s *stats; member 1175 s->stats->ts_tid, s->stats->ts_requested, in jbd2_seq_info_show() 1177 if (s->stats->ts_tid == 0) in jbd2_seq_info_show() 1180 jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid)); in jbd2_seq_info_show() 1182 (s->stats->ts_requested == 0) ? 0 : in jbd2_seq_info_show() 1183 jiffies_to_msecs(s->stats->run.rs_request_delay / in jbd2_seq_info_show() 1184 s->stats->ts_requested)); in jbd2_seq_info_show() 1186 jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid)); in jbd2_seq_info_show() 1188 jiffies_to_msecs(s->stats->run.rs_locked / s->stats->ts_tid)); in jbd2_seq_info_show() 1190 jiffies_to_msecs(s->stats->run.rs_flushing / s->stats->ts_tid)); in jbd2_seq_info_show() [all …]
|
D | checkpoint.c | 568 struct transaction_chp_stats_s *stats; in __jbd2_journal_remove_checkpoint() local 619 stats = &transaction->t_chp_stats; in __jbd2_journal_remove_checkpoint() 620 if (stats->cs_chp_time) in __jbd2_journal_remove_checkpoint() 621 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, in __jbd2_journal_remove_checkpoint() 624 transaction->t_tid, stats); in __jbd2_journal_remove_checkpoint()
|
/fs/smb/client/ |
D | smb1ops.c | 641 atomic_set(&tcon->stats.cifs_stats.num_writes, 0); in cifs_clear_stats() 642 atomic_set(&tcon->stats.cifs_stats.num_reads, 0); in cifs_clear_stats() 643 atomic_set(&tcon->stats.cifs_stats.num_flushes, 0); in cifs_clear_stats() 644 atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0); in cifs_clear_stats() 645 atomic_set(&tcon->stats.cifs_stats.num_opens, 0); in cifs_clear_stats() 646 atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0); in cifs_clear_stats() 647 atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0); in cifs_clear_stats() 648 atomic_set(&tcon->stats.cifs_stats.num_closes, 0); in cifs_clear_stats() 649 atomic_set(&tcon->stats.cifs_stats.num_deletes, 0); in cifs_clear_stats() 650 atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0); in cifs_clear_stats() [all …]
|
/fs/ubifs/ |
D | sysfs.c | 57 return sysfs_emit(buf, "%u\n", sbi->stats->magic_errors); in ubifs_attr_show() 59 return sysfs_emit(buf, "%u\n", sbi->stats->node_errors); in ubifs_attr_show() 61 return sysfs_emit(buf, "%u\n", sbi->stats->crc_errors); in ubifs_attr_show() 96 c->stats = kzalloc(sizeof(struct ubifs_stats_info), GFP_KERNEL); in ubifs_sysfs_register() 97 if (!c->stats) { in ubifs_sysfs_register() 124 kfree(c->stats); in ubifs_sysfs_register() 137 kfree(c->stats); in ubifs_sysfs_unregister()
|
D | io.c | 197 static void record_magic_error(struct ubifs_stats_info *stats) in record_magic_error() argument 199 if (stats) in record_magic_error() 200 stats->magic_errors++; in record_magic_error() 203 static void record_node_error(struct ubifs_stats_info *stats) in record_node_error() argument 205 if (stats) in record_node_error() 206 stats->node_errors++; in record_node_error() 209 static void record_crc_error(struct ubifs_stats_info *stats) in record_crc_error() argument 211 if (stats) in record_crc_error() 212 stats->crc_errors++; in record_crc_error() 259 record_magic_error(c->stats); in ubifs_check_node() [all …]
|
/fs/ext4/ |
D | ialloc.c | 377 int flex_size, struct orlov_stats *stats) in get_orlov_stats() argument 384 stats->free_inodes = atomic_read(&fg->free_inodes); in get_orlov_stats() 385 stats->free_clusters = atomic64_read(&fg->free_clusters); in get_orlov_stats() 386 stats->used_dirs = atomic_read(&fg->used_dirs); in get_orlov_stats() 392 stats->free_inodes = ext4_free_inodes_count(sb, desc); in get_orlov_stats() 393 stats->free_clusters = ext4_free_group_clusters(sb, desc); in get_orlov_stats() 394 stats->used_dirs = ext4_used_dirs_count(sb, desc); in get_orlov_stats() 396 stats->free_inodes = 0; in get_orlov_stats() 397 stats->free_clusters = 0; in get_orlov_stats() 398 stats->used_dirs = 0; in get_orlov_stats() [all …]
|
D | fast_commit.c | 1181 struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats; in ext4_fc_update_stats() local 1186 stats->fc_num_commits++; in ext4_fc_update_stats() 1187 stats->fc_numblks += nblks; in ext4_fc_update_stats() 1188 if (likely(stats->s_fc_avg_commit_time)) in ext4_fc_update_stats() 1189 stats->s_fc_avg_commit_time = in ext4_fc_update_stats() 1191 stats->s_fc_avg_commit_time * 3) / 4; in ext4_fc_update_stats() 1193 stats->s_fc_avg_commit_time = commit_time; in ext4_fc_update_stats() 1197 stats->fc_failed_commits++; in ext4_fc_update_stats() 1198 stats->fc_ineligible_commits++; in ext4_fc_update_stats() 1200 stats->fc_skipped_commits++; in ext4_fc_update_stats() [all …]
|
/fs/nfs/ |
D | iostat.h | 71 static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) in nfs_free_iostats() argument 73 if (stats != NULL) in nfs_free_iostats() 74 free_percpu(stats); in nfs_free_iostats()
|
/fs/nilfs2/ |
D | btree.c | 1059 struct nilfs_bmap_stats *stats) in nilfs_btree_prepare_insert() argument 1067 stats->bs_nblocks = 0; in nilfs_btree_prepare_insert() 1089 stats->bs_nblocks++; in nilfs_btree_prepare_insert() 1107 stats->bs_nblocks++; in nilfs_btree_prepare_insert() 1125 stats->bs_nblocks++; in nilfs_btree_prepare_insert() 1145 stats->bs_nblocks++; in nilfs_btree_prepare_insert() 1158 stats->bs_nblocks++; in nilfs_btree_prepare_insert() 1181 stats->bs_nblocks += 2; in nilfs_btree_prepare_insert() 1201 stats->bs_nblocks = 0; in nilfs_btree_prepare_insert() 1232 struct nilfs_bmap_stats stats; in nilfs_btree_insert() local [all …]
|
/fs/netfs/ |
D | Kconfig | 17 /proc/fs/fscache/stats 20 execution as there are a quite a few stats gathered, and on a 22 between CPUs. On the other hand, the stats are very useful for
|
D | Makefile | 9 netfs-$(CONFIG_NETFS_STATS) += stats.o
|
/fs/fscache/ |
D | Kconfig | 22 /proc/fs/fscache/stats 25 execution as there are a quite a few stats gathered, and on a 27 between CPUs. On the other hand, the stats are very useful for
|
D | Makefile | 14 fscache-$(CONFIG_FSCACHE_STATS) += stats.o
|
/fs/gfs2/ |
D | trace_gfs2.h | 285 __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT]; 286 __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR]; 287 __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; 288 __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; 289 __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT]; 290 __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR]; 291 __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; 292 __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
|
/fs/smb/server/ |
D | vfs_cache.c | 363 atomic_dec(&work->conn->stats.open_files_count); in __put_fd_final() 598 atomic_inc(&work->conn->stats.open_files_count); in ksmbd_open_fd() 659 atomic_sub(num, &work->conn->stats.open_files_count); in ksmbd_close_tree_conn_fds() 668 atomic_sub(num, &work->conn->stats.open_files_count); in ksmbd_close_session_fds()
|
/fs/nfsd/ |
D | export.c | 334 static int export_stats_init(struct export_stats *stats) in export_stats_init() argument 336 stats->start_time = ktime_get_seconds(); in export_stats_init() 337 return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM); in export_stats_init() 340 static void export_stats_reset(struct export_stats *stats) in export_stats_reset() argument 342 nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM); in export_stats_reset() 345 static void export_stats_destroy(struct export_stats *stats) in export_stats_destroy() argument 347 nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM); in export_stats_destroy()
|