Home
last modified time | relevance | path

Searched refs:sd_vfs (Results 1 – 17 of 17) sorted by relevance

/fs/gfs2/
Dtrace_gfs2.h108 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
144 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
178 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
213 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
243 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
282 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
337 __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
367 __entry->dev = sdp->sd_vfs->s_dev;
394 __entry->dev = sdp->sd_vfs->s_dev;
418 __entry->dev = sdp->sd_vfs->s_dev;
[all …]
Dsys.c63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); in id_show()
73 struct super_block *s = sdp->sd_vfs; in uuid_show()
83 struct super_block *sb = sdp->sd_vfs; in freeze_show()
102 error = thaw_super(sdp->sd_vfs); in freeze_store()
105 error = freeze_super(sdp->sd_vfs); in freeze_store()
160 gfs2_statfs_sync(sdp->sd_vfs, 0); in statfs_sync_store()
179 gfs2_quota_sync(sdp->sd_vfs, 0); in quota_sync_store()
649 struct super_block *sb = sdp->sd_vfs; in gfs2_sys_fs_add()
707 struct super_block *s = sdp->sd_vfs; in gfs2_uevent()
Dtrans.c74 sb_start_intwrite(sdp->sd_vfs); in gfs2_trans_begin()
85 sb_end_intwrite(sdp->sd_vfs); in gfs2_trans_begin()
103 sb_end_intwrite(sdp->sd_vfs); in gfs2_trans_end()
121 if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS) in gfs2_trans_end()
125 sb_end_intwrite(sdp->sd_vfs); in gfs2_trans_end()
Dutil.c152 if (!sb_rdonly(sdp->sd_vfs)) { in signal_our_withdraw()
187 thaw_super(sdp->sd_vfs); in signal_our_withdraw()
270 inode = gfs2_inode_lookup(sdp->sd_vfs, DT_UNKNOWN, in signal_our_withdraw()
Dops_fstype.c81 sdp->sd_vfs = sb; in init_sbd()
206 struct super_block *s = sdp->sd_vfs; in gfs2_sb_in()
247 struct super_block *sb = sdp->sd_vfs; in gfs2_read_super()
385 table = sdp->sd_vfs->s_id; in init_names()
478 struct super_block *sb = sdp->sd_vfs; in init_sb()
1071 struct super_block *sb = sdp->sd_vfs; in gfs2_online_uevent()
Drecovery.c483 if (sb_rdonly(sdp->sd_vfs)) { in gfs2_recover_func()
485 ro = bdev_read_only(sdp->sd_vfs->s_bdev); in gfs2_recover_func()
Dsuper.c577 gfs2_quota_sync(sdp->sd_vfs, 0); in gfs2_make_fs_ro()
578 gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_make_fs_ro()
594 sdp->sd_vfs->s_flags |= SB_RDONLY; in gfs2_make_fs_ro()
696 struct super_block *sb = sdp->sd_vfs; in gfs2_freeze_func()
Dquota.c463 if (sb_rdonly(sdp->sd_vfs)) in qd_fish()
1255 sdp->sd_vfs->s_dev, in gfs2_quota_check()
1266 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); in gfs2_quota_check()
1519 int error = fxn(sdp->sd_vfs, 0); in quotad_check_timeo()
1576 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_quotad()
Dglops.c592 error = freeze_super(sdp->sd_vfs); in freeze_go_sync()
664 if (!remote || sb_rdonly(sdp->sd_vfs)) in iopen_go_callback()
Dlops.c264 struct super_block *sb = sdp->sd_vfs; in gfs2_log_alloc_bio()
374 struct super_block *sb = sdp->sd_vfs; in gfs2_log_write_page()
Dmeta_io.c150 map_bh(bh, sdp->sd_vfs, blkno); in gfs2_getbuf()
Dlog.c434 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); in gfs2_log_reserve()
768 struct super_block *sb = sdp->sd_vfs; in gfs2_write_log_header()
Dincore.h709 struct super_block *sd_vfs; member
Dinode.c242 struct super_block *sb = sdp->sd_vfs; in gfs2_lookup_by_inum()
660 inode = new_inode(sdp->sd_vfs); in gfs2_create_inode()
Dbmap.c1375 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; in gfs2_journaled_truncate()
2410 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; in gfs2_journaled_truncate_range()
Drgrp.c1290 struct super_block *sb = sdp->sd_vfs; in gfs2_rgrp_send_discards()
1365 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); in gfs2_fitrim()
Dglock.c1017 struct super_block *s = sdp->sd_vfs; in gfs2_glock_get()