/fs/gfs2/ |
D | super.c | 132 struct gfs2_glock *j_gl = ip->i_gl; in gfs2_make_fs_rw() 190 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, in gfs2_statfs_init() 229 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); in gfs2_statfs_change() 256 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); in update_statfs() 257 gfs2_trans_add_meta(m_ip->i_gl, m_bh); in update_statfs() 280 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, in gfs2_statfs_sync() 344 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); in gfs2_lock_fs_check_clean() 433 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); in gfs2_write_inode() 439 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, in gfs2_write_inode() 484 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_dirty_inode() [all …]
|
D | inode.c | 149 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); in gfs2_inode_lookup() 152 flush_delayed_work(&ip->i_gl->gl_work); in gfs2_inode_lookup() 166 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, in gfs2_inode_lookup() 173 gfs2_inode_already_deleted(ip->i_gl, no_formal_ino)) in gfs2_inode_lookup() 184 glock_set_object(ip->i_gl, ip); in gfs2_inode_lookup() 320 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) { in gfs2_lookupi() 321 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_lookupi() 460 bh = gfs2_meta_new(ip->i_gl, ip->i_eattr); in gfs2_init_xattr() 461 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_init_xattr() 487 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr); in init_dinode() [all …]
|
D | xattr.c | 131 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh); in ea_foreach() 155 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh); in ea_foreach() 278 gfs2_trans_add_meta(ip->i_gl, bh); in ea_dealloc_unstuffed() 425 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_listxattr() 471 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0, in gfs2_iter_unstuffed() 504 gfs2_trans_add_meta(ip->i_gl, bh[x]); in gfs2_iter_unstuffed() 613 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_xattr_get() 614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); in gfs2_xattr_get() 646 *bhp = gfs2_meta_new(ip->i_gl, block); in ea_alloc_blk() 647 gfs2_trans_add_meta(ip->i_gl, *bhp); in ea_alloc_blk() [all …]
|
D | file.c | 65 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_llseek() 111 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_readdir() 168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fileattr_get() 229 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in do_gfs2_set_flags() 251 gfs2_log_flush(sdp, ip->i_gl, in do_gfs2_set_flags() 270 gfs2_trans_add_meta(ip->i_gl, bh); in do_gfs2_set_flags() 438 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_page_mkwrite() 463 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_page_mkwrite() 564 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fault() 604 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_mmap() [all …]
|
D | util.c | 60 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | in check_journal_clean() 125 struct gfs2_glock *i_gl; in signal_our_withdraw() local 137 i_gl = ip->i_gl; in signal_our_withdraw() 191 wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, in signal_our_withdraw() 212 if (i_gl->gl_ops->go_free) { in signal_our_withdraw() 213 set_bit(GLF_FREEING, &i_gl->gl_flags); in signal_our_withdraw() 214 wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); in signal_our_withdraw()
|
D | acl.c | 70 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_get_acl() 71 int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_get_acl() 128 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_set_acl() 129 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_set_acl()
|
D | dir.c | 94 bh = gfs2_meta_new(ip->i_gl, block); in gfs2_dir_get_new_buffer() 95 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_get_new_buffer() 108 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh); in gfs2_dir_get_existing_buffer() 129 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_stuffed() 210 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_write_data() 232 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_data() 301 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_dir_read_data() 303 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, 0, &bh); in gfs2_dir_read_data() 678 gfs2_trans_add_meta(dip->i_gl, bh); in dirent_del() 717 gfs2_trans_add_meta(ip->i_gl, bh); in do_init_dirent() [all …]
|
D | dentry.c | 63 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); in gfs2_drevalidate() 65 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_drevalidate()
|
D | bmap.c | 83 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_unstuffer_page() 129 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_unstuff_inode() 679 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_iomap_alloc() 723 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); in __gfs2_iomap_alloc() 749 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); in __gfs2_iomap_alloc() 751 gfs2_indirect_init(mp, ip->i_gl, i, in __gfs2_iomap_alloc() 761 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); in __gfs2_iomap_alloc() 1168 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_iomap_end() 1369 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_start() 1541 gfs2_trans_add_meta(ip->i_gl, bh); in sweep_bh_for_rgrps() [all …]
|
D | aops.c | 57 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_page_add_databufs() 96 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) in gfs2_writepage() 182 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) in gfs2_jdata_writepage() 417 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_jdata_writepages() 635 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_bmap()
|
D | lops.c | 796 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() 851 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan() 857 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan() 1020 struct gfs2_glock *gl = ip->i_gl; in databuf_lo_scan_elements() 1071 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan() 1078 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
|
D | quota.c | 388 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); in bh_get() 679 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc() 762 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page() 927 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync() 986 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync() 1043 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock() 1403 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init() 1716 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
|
D | recovery.c | 36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block() 353 gfs2_inode_metasync(ip->i_gl); in update_statfs_inode() 447 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_recover_func()
|
D | trace_gfs2.h | 458 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 494 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 526 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
|
D | meta_io.c | 449 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); in gfs2_journal_wipe() 484 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_buffer()
|
D | ops_fstype.c | 585 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); in gfs2_jindex_hold() 699 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, in init_statfs() 796 sdp->sd_jinode_gl = ip->i_gl; in init_journal() 797 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in init_journal() 969 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, in init_per_node()
|
D | export.c | 112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_get_name()
|
D | incore.h | 390 struct gfs2_glock *i_gl; member
|
D | glock.c | 693 if (gl == m_ip->i_gl) in is_system_glock() 951 inode_gl = ip->i_gl; in gfs2_try_evict() 2189 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate()
|
D | glops.c | 612 struct gfs2_glock *j_gl = ip->i_gl; in freeze_go_xmote_bh()
|
D | rgrp.c | 1037 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() 2469 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_alloc_blocks()
|