Home
last modified time | relevance | path

Searched refs:gl (Results 1 – 24 of 24) sorted by relevance

/fs/gfs2/
Dglock.c57 struct gfs2_glock *gl; /* current glock struct */ member
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
65 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
122 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
124 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
127 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
132 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
134 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
135 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
[all …]
Dglock.h135 void (*lm_put_lock) (struct gfs2_glock *gl);
136 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
138 void (*lm_cancel) (struct gfs2_glock *gl);
148 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument
154 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
156 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me()
166 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
171 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument
173 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl()
176 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument
[all …]
Dglops.c34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_error()
44 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
45 gfs2_glock2aspace(gl)); in gfs2_ail_error()
59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
63 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
77 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); in __gfs2_ail_flush()
88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) in gfs2_ail_empty_gl() argument
[all …]
Dlock_dlm.c75 static inline void gfs2_update_reply_times(struct gfs2_glock *gl) in gfs2_update_reply_times() argument
78 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
79 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
84 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
85 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
86 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
90 trace_gfs2_glock_lock_time(gl, rtt); in gfs2_update_reply_times()
102 static inline void gfs2_update_request_times(struct gfs2_glock *gl) in gfs2_update_request_times() argument
105 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
110 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
[all …]
Dtrace_gfs2.h92 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
94 TP_ARGS(gl, new_state),
108 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
109 __entry->glnum = gl->gl_name.ln_number;
110 __entry->gltype = gl->gl_name.ln_type;
111 __entry->cur_state = glock_trace_state(gl->gl_state);
113 __entry->tgt_state = glock_trace_state(gl->gl_target);
114 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
115 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
131 TP_PROTO(const struct gfs2_glock *gl),
[all …]
Dmeta_io.c114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) in gfs2_getbuf() argument
116 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf()
117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_getbuf()
186 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) in gfs2_meta_new() argument
189 bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_new()
253 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, in gfs2_meta_read() argument
256 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_meta_read()
265 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_read()
278 bh = gfs2_getbuf(gl, blkno + 1, CREATE); in gfs2_meta_read()
483 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_buffer() local
[all …]
Dincore.h38 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
218 int (*go_sync) (struct gfs2_glock *gl);
219 int (*go_xmote_bh)(struct gfs2_glock *gl);
220 void (*go_inval) (struct gfs2_glock *gl, int flags);
221 int (*go_demote_ok) (const struct gfs2_glock *gl);
222 int (*go_instantiate) (struct gfs2_glock *gl);
224 void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
226 void (*go_callback)(struct gfs2_glock *gl, bool remote);
227 void (*go_free)(struct gfs2_glock *gl);
852 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) in gfs2_glstats_inc() argument
[all …]
Dmain.c52 struct gfs2_glock *gl = foo; in gfs2_init_glock_once() local
54 spin_lock_init(&gl->gl_lockref.lock); in gfs2_init_glock_once()
55 INIT_LIST_HEAD(&gl->gl_holders); in gfs2_init_glock_once()
56 INIT_LIST_HEAD(&gl->gl_lru); in gfs2_init_glock_once()
57 INIT_LIST_HEAD(&gl->gl_ail_list); in gfs2_init_glock_once()
58 atomic_set(&gl->gl_ail_count, 0); in gfs2_init_glock_once()
59 atomic_set(&gl->gl_revokes, 0); in gfs2_init_glock_once()
Dtrans.c163 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, in gfs2_alloc_bufdata() argument
170 bd->bd_gl = gl; in gfs2_alloc_bufdata()
192 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_data() argument
195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_data()
209 bd = gfs2_alloc_bufdata(gl, bh); in gfs2_trans_add_data()
215 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_data()
229 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_meta() argument
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_meta()
250 bd = gfs2_alloc_bufdata(gl, bh); in gfs2_trans_add_meta()
257 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_meta()
Dmeta_io.h53 extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
54 extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
57 extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
75 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
Drgrp.c720 struct gfs2_glock *gl; in gfs2_clear_rgrpd() local
724 gl = rgd->rd_gl; in gfs2_clear_rgrpd()
728 if (gl) { in gfs2_clear_rgrpd()
729 if (gl->gl_state != LM_ST_UNLOCKED) { in gfs2_clear_rgrpd()
730 gfs2_glock_cb(gl, LM_ST_UNLOCKED); in gfs2_clear_rgrpd()
731 flush_delayed_work(&gl->gl_work); in gfs2_clear_rgrpd()
734 glock_clear_object(gl, rgd); in gfs2_clear_rgrpd()
735 gfs2_glock_put(gl); in gfs2_clear_rgrpd()
1037 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() local
1044 if (!gfs2_glock_is_locked_by_me(gl)) { in gfs2_rindex_update()
[all …]
Dlops.c73 struct gfs2_glock *gl = bd->bd_gl; in maybe_release_space() local
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in maybe_release_space()
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in maybe_release_space()
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; in maybe_release_space()
122 struct gfs2_glock *gl = bd->bd_gl; in gfs2_unpin() local
123 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); in gfs2_unpin()
124 atomic_inc(&gl->gl_ail_count); in gfs2_unpin()
793 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() local
816 bh_ip = gfs2_meta_new(gl, blkno); in buf_lo_scan_elements()
903 struct gfs2_glock *gl; in gfs2_drain_revokes() local
[all …]
Dsuper.c938 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local
939 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_drop_inode()
951 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local
953 gfs2_glock_hold(gl); in gfs2_drop_inode()
954 if (!gfs2_queue_delete_work(gl, 0)) in gfs2_drop_inode()
955 gfs2_glock_queue_put(gl); in gfs2_drop_inode()
1086 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages() local
1091 if (atomic_read(&gl->gl_revokes) == 0) { in gfs2_final_release_pages()
1092 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_final_release_pages()
1093 clear_bit(GLF_DIRTY, &gl->gl_flags); in gfs2_final_release_pages()
[all …]
Dlog.c106 struct gfs2_glock *gl = NULL; in gfs2_ail1_start_one() local
135 if (gl == bd->bd_gl) in gfs2_ail1_start_one()
137 gl = bd->bd_gl; in gfs2_ail1_start_one()
776 struct gfs2_glock *gl = bd->bd_gl; in gfs2_add_revoke() local
779 if (atomic_inc_return(&gl->gl_revokes) == 1) in gfs2_add_revoke()
780 gfs2_glock_hold(gl); in gfs2_add_revoke()
785 set_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_add_revoke()
789 void gfs2_glock_remove_revoke(struct gfs2_glock *gl) in gfs2_glock_remove_revoke() argument
791 if (atomic_dec_return(&gl->gl_revokes) == 0) { in gfs2_glock_remove_revoke()
792 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_glock_remove_revoke()
[all …]
Dglops.h25 extern int gfs2_inode_metasync(struct gfs2_glock *gl);
26 extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
Dtrans.h44 extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
45 extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
Dlog.h86 extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
94 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
Dinode.c867 struct gfs2_glock *gl; in __gfs2_lookup() local
878 gl = GFS2_I(inode)->i_gl; in __gfs2_lookup()
879 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); in __gfs2_lookup()
1851 struct gfs2_glock *gl; in gfs2_permission() local
1856 gl = rcu_dereference_check(ip->i_gl, !may_not_block); in gfs2_permission()
1857 if (unlikely(!gl)) { in gfs2_permission()
1862 if (gfs2_glock_is_locked_by_me(gl) == NULL) { in gfs2_permission()
1865 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_permission()
2139 struct gfs2_glock *gl = ip->i_gl; in gfs2_update_time() local
2143 gh = gfs2_glock_is_locked_by_me(gl); in gfs2_update_time()
[all …]
Dfile.c1459 struct gfs2_glock *gl = fl_gh->gh_gl; in __flock_holder_uninit() local
1466 gfs2_glock_hold(gl); in __flock_holder_uninit()
1470 gfs2_glock_put(gl); in __flock_holder_uninit()
1478 struct gfs2_glock *gl; in do_flock() local
1503 &gfs2_flock_glops, CREATE, &gl); in do_flock()
1507 gfs2_holder_init(gl, state, flags, fl_gh); in do_flock()
1509 gfs2_glock_put(gl); in do_flock()
Dsys.c294 struct gfs2_glock *gl; in demote_rq_store() local
329 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); in demote_rq_store()
332 gfs2_glock_cb(gl, glmode); in demote_rq_store()
333 gfs2_glock_put(gl); in demote_rq_store()
Drgrp.h34 extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
Drecovery.c36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block() local
50 *bh = gfs2_meta_ra(gl, dblock, extlen); in gfs2_replay_read_block()
Dbmap.c296 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) in gfs2_metapath_ra() argument
306 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); in gfs2_metapath_ra()
606 struct gfs2_glock *gl, unsigned int i, in gfs2_indirect_init() argument
614 mp->mp_bh[i] = gfs2_meta_new(gl, bn); in gfs2_indirect_init()
615 gfs2_trans_add_meta(gl, mp->mp_bh[i]); in gfs2_indirect_init()
Ddir.c1483 struct gfs2_glock *gl = ip->i_gl; in gfs2_dir_readahead() local
1503 bh = gfs2_getbuf(gl, blocknr, 1); in gfs2_dir_readahead()