• Home
  • Raw
  • Download

Lines Matching refs:gl

54 	struct gfs2_glock *gl;		/* current glock struct        */  member
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
116 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
128 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
129 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
131 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
132 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
136 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
140 BUG_ON(atomic_read(&gl->gl_revokes)); in gfs2_glock_free()
141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_free()
143 wake_up_glock(gl); in gfs2_glock_free()
144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
155 void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
158 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
168 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
170 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
172 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
174 if (!list_empty(&gl->gl_holders)) in demote_ok()
177 return glops->go_demote_ok(gl); in demote_ok()
182 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
184 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
189 list_del(&gl->gl_lru); in gfs2_glock_add_to_lru()
190 list_add_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
193 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
200 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
202 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
206 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
207 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
209 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
218 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in __gfs2_glock_queue_work() argument
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in __gfs2_glock_queue_work()
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in __gfs2_glock_queue_work()
227 gl->gl_lockref.count--; in __gfs2_glock_queue_work()
231 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in gfs2_glock_queue_work() argument
232 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
233 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_queue_work()
234 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
237 static void __gfs2_glock_put(struct gfs2_glock *gl) in __gfs2_glock_put() argument
239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
240 struct address_space *mapping = gfs2_glock2aspace(gl); in __gfs2_glock_put()
242 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
244 gfs2_glock_remove_from_lru(gl); in __gfs2_glock_put()
245 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); in __gfs2_glock_put()
248 trace_gfs2_glock_put(gl); in __gfs2_glock_put()
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
255 void gfs2_glock_queue_put(struct gfs2_glock *gl) in gfs2_glock_queue_put() argument
257 gfs2_glock_queue_work(gl, 0); in gfs2_glock_queue_put()
266 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
268 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
271 __gfs2_glock_put(gl); in gfs2_glock_put()
282 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
284 …const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_l… in may_grant()
288 if (gl->gl_state == gh->gh_state) in may_grant()
292 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
320 static void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
347 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
348 __releases(&gl->gl_lockref.lock) in do_promote()
349 __acquires(&gl->gl_lockref.lock) in do_promote()
351 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
359 if (may_grant(gl, gh)) { in do_promote()
360 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
362 spin_unlock(&gl->gl_lockref.lock); in do_promote()
365 spin_lock(&gl->gl_lockref.lock); in do_promote()
385 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
387 do_error(gl, 0); in do_promote()
398 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
416 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
420 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
426 gl->gl_lockref.count++; in state_change()
428 gl->gl_lockref.count--; in state_change()
430 if (held1 && held2 && list_empty(&gl->gl_holders)) in state_change()
431 clear_bit(GLF_QUEUED, &gl->gl_flags); in state_change()
433 if (new_state != gl->gl_target) in state_change()
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
437 gl->gl_state = new_state; in state_change()
438 gl->gl_tchange = jiffies; in state_change()
441 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
443 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
444 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
456 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
458 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
463 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
464 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
465 state_change(gl, state); in finish_xmote()
466 gh = find_first_waiter(gl); in finish_xmote()
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
471 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
474 if (unlikely(state != gl->gl_target)) { in finish_xmote()
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
479 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
480 gh = find_first_waiter(gl); in finish_xmote()
481 gl->gl_target = gh->gh_state; in finish_xmote()
487 gl->gl_target = gl->gl_state; in finish_xmote()
488 do_error(gl, ret); in finish_xmote()
496 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
501 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", in finish_xmote()
505 gl->gl_target, state); in finish_xmote()
506 GLOCK_BUG_ON(gl, 1); in finish_xmote()
508 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
514 gfs2_demote_wake(gl); in finish_xmote()
517 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
518 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
519 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
521 do_error(gl, rv); in finish_xmote()
525 rv = do_promote(gl); in finish_xmote()
530 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
532 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
543 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
544 __releases(&gl->gl_lockref.lock) in do_xmote()
545 __acquires(&gl->gl_lockref.lock) in do_xmote()
547 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
557 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
561 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
562 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
564 gl->gl_req = target; in do_xmote()
565 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
566 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
567 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
569 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
570 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
572 glops->go_sync(gl); in do_xmote()
573 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in do_xmote()
574 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
575 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
577 gfs2_glock_hold(gl); in do_xmote()
580 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
581 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
584 finish_xmote(gl, target); in do_xmote()
585 gfs2_glock_queue_work(gl, 0); in do_xmote()
589 GLOCK_BUG_ON(gl, !test_bit(SDF_WITHDRAWN, in do_xmote()
593 finish_xmote(gl, target); in do_xmote()
594 gfs2_glock_queue_work(gl, 0); in do_xmote()
597 spin_lock(&gl->gl_lockref.lock); in do_xmote()
605 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
609 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
610 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in find_first_holder()
624 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
625 __releases(&gl->gl_lockref.lock) in run_queue()
626 __acquires(&gl->gl_lockref.lock) in run_queue()
631 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
634 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
636 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
637 gl->gl_demote_state != gl->gl_state) { in run_queue()
638 if (find_first_holder(gl)) in run_queue()
642 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
643 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
644 gl->gl_target = gl->gl_demote_state; in run_queue()
646 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
647 gfs2_demote_wake(gl); in run_queue()
648 ret = do_promote(gl); in run_queue()
653 gh = find_first_waiter(gl); in run_queue()
654 gl->gl_target = gh->gh_state; in run_queue()
656 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
658 do_xmote(gl, gh, gl->gl_target); in run_queue()
663 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
665 gl->gl_lockref.count++; in run_queue()
666 __gfs2_glock_queue_work(gl, 0); in run_queue()
670 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
677 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); in delete_work_func() local
678 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
680 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
685 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) in delete_work_func()
694 gfs2_glock_put(gl); in delete_work_func()
700 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
703 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
704 finish_xmote(gl, gl->gl_reply); in glock_work_func()
707 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
708 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
709 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
710 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
713 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
718 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
719 set_bit(GLF_DEMOTE, &gl->gl_flags); in glock_work_func()
722 run_queue(gl, 0); in glock_work_func()
726 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
728 __gfs2_glock_queue_work(gl, delay); in glock_work_func()
736 gl->gl_lockref.count -= drop_refs; in glock_work_func()
737 if (!gl->gl_lockref.count) { in glock_work_func()
738 __gfs2_glock_put(gl); in glock_work_func()
741 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
749 struct gfs2_glock *gl; in find_insert_glock() local
759 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, in find_insert_glock()
761 if (IS_ERR(gl)) in find_insert_glock()
764 gl = rhashtable_lookup_fast(&gl_hash_table, in find_insert_glock()
767 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
775 return gl; in find_insert_glock()
799 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
804 gl = find_insert_glock(&name, NULL); in gfs2_glock_get()
805 if (gl) { in gfs2_glock_get()
806 *glp = gl; in gfs2_glock_get()
816 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
817 if (!gl) in gfs2_glock_get()
820 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
823 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
824 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
825 kmem_cache_free(cachep, gl); in gfs2_glock_get()
831 gl->gl_node.next = NULL; in gfs2_glock_get()
832 gl->gl_flags = 0; in gfs2_glock_get()
833 gl->gl_name = name; in gfs2_glock_get()
834 gl->gl_lockref.count = 1; in gfs2_glock_get()
835 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
836 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
837 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
838 gl->gl_ops = glops; in gfs2_glock_get()
839 gl->gl_dstamp = 0; in gfs2_glock_get()
842 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
844 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
845 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
846 gl->gl_tchange = jiffies; in gfs2_glock_get()
847 gl->gl_object = NULL; in gfs2_glock_get()
848 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
849 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
850 INIT_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
852 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
862 tmp = find_insert_glock(&name, gl); in gfs2_glock_get()
864 *glp = gl; in gfs2_glock_get()
874 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
875 kmem_cache_free(cachep, gl); in gfs2_glock_get()
892 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, in gfs2_holder_init() argument
896 gh->gh_gl = gl; in gfs2_holder_init()
903 gfs2_glock_hold(gl); in gfs2_holder_init()
940 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, in gfs2_glock_update_hold_time() argument
946 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, in gfs2_glock_update_hold_time()
1062 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
1067 set_bit(bit, &gl->gl_flags); in handle_callback()
1068 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
1069 gl->gl_demote_state = state; in handle_callback()
1070 gl->gl_demote_time = jiffies; in handle_callback()
1071 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
1072 gl->gl_demote_state != state) { in handle_callback()
1073 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
1075 if (gl->gl_ops->go_callback) in handle_callback()
1076 gl->gl_ops->go_callback(gl, remote); in handle_callback()
1077 trace_gfs2_demote_rq(gl, remote); in handle_callback()
1110 __releases(&gl->gl_lockref.lock) in add_to_queue()
1111 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1113 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
1114 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1119 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1121 GLOCK_BUG_ON(gl, true); in add_to_queue()
1124 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
1125 try_futile = !may_grant(gl, gh); in add_to_queue()
1126 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1130 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1146 set_bit(GLF_QUEUED, &gl->gl_flags); in add_to_queue()
1148 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1149 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1151 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1158 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in add_to_queue()
1160 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1162 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1163 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1176 gfs2_dump_glock(NULL, gl, true); in add_to_queue()
1191 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_nq()
1198 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1199 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1201 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1204 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1205 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1206 gl->gl_lockref.count++; in gfs2_glock_nq()
1207 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_nq()
1209 run_queue(gl, 1); in gfs2_glock_nq()
1210 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1238 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1239 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_glock_dq()
1243 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1245 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1249 if (find_first_holder(gl) == NULL) { in gfs2_glock_dq()
1251 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_glock_dq()
1252 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1254 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1255 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_dq()
1257 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1258 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1259 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1262 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in gfs2_glock_dq()
1263 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1267 gl->gl_lockref.count++; in gfs2_glock_dq()
1268 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1269 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1270 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1271 delay = gl->gl_hold_time; in gfs2_glock_dq()
1272 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_dq()
1274 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1279 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1282 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1313 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1316 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1318 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1319 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1433 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1439 gfs2_glock_hold(gl); in gfs2_glock_cb()
1440 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1441 if (test_bit(GLF_QUEUED, &gl->gl_flags) && in gfs2_glock_cb()
1442 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1445 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1446 delay = gl->gl_hold_time; in gfs2_glock_cb()
1449 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1450 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1451 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_cb()
1452 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1466 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1470 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1472 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1475 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1494 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1496 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1498 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1499 gl->gl_reply = ret; in gfs2_glock_complete()
1502 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1503 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1504 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1509 gl->gl_lockref.count++; in gfs2_glock_complete()
1510 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1511 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_complete()
1512 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1548 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1553 gl = list_entry(list->next, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1554 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1555 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1556 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1558 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1559 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1563 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1564 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1567 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1568 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1569 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1570 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1571 __gfs2_glock_queue_work(gl, 0); in gfs2_dispose_glock_lru()
1572 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1588 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1595 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1598 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1599 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1605 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1648 struct gfs2_glock *gl; in glock_hash_walk() local
1656 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) in glock_hash_walk()
1657 if (gl->gl_name.ln_sbd == sdp && in glock_hash_walk()
1658 lockref_get_not_dead(&gl->gl_lockref)) in glock_hash_walk()
1659 examiner(gl); in glock_hash_walk()
1662 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
1673 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1675 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { in thaw_glock()
1676 gfs2_glock_put(gl); in thaw_glock()
1679 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1680 gfs2_glock_queue_work(gl, 0); in thaw_glock()
1689 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1691 gfs2_glock_remove_from_lru(gl); in clear_glock()
1693 spin_lock(&gl->gl_lockref.lock); in clear_glock()
1694 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1695 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1696 __gfs2_glock_queue_work(gl, 0); in clear_glock()
1697 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
1711 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in dump_glock() argument
1713 spin_lock(&gl->gl_lockref.lock); in dump_glock()
1714 gfs2_dump_glock(seq, gl, fsid); in dump_glock()
1715 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
1718 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
1720 dump_glock(NULL, gl, true); in dump_glock_func()
1745 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
1749 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); in gfs2_glock_finish_truncate()
1751 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1752 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
1753 run_queue(gl, 1); in gfs2_glock_finish_truncate()
1754 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1828 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
1830 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
1857 if (gl->gl_object) in gflags2str()
1883 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in gfs2_dump_glock() argument
1885 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
1889 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_dump_glock()
1895 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
1897 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
1900 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state), in gfs2_dump_glock()
1901 gl->gl_name.ln_type, in gfs2_dump_glock()
1902 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
1903 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
1904 state2str(gl->gl_target), in gfs2_dump_glock()
1905 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
1906 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
1907 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
1908 (int)gl->gl_lockref.count, gl->gl_hold_time); in gfs2_dump_glock()
1910 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
1913 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
1914 glops->go_dump(seq, gl, fs_id_buf); in gfs2_dump_glock()
1919 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
1922 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
1923 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
1924 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
1925 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
1926 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
1927 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
1928 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
1929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
1930 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
1931 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
2036 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next() local
2038 if (gl) { in gfs2_glock_iter_next()
2041 if (!lockref_put_not_zero(&gl->gl_lockref)) in gfs2_glock_iter_next()
2042 gfs2_glock_queue_put(gl); in gfs2_glock_iter_next()
2045 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
2046 if (IS_ERR_OR_NULL(gl)) { in gfs2_glock_iter_next()
2047 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
2051 gl = NULL; in gfs2_glock_iter_next()
2054 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
2057 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2061 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2066 gi->gl = gl; in gfs2_glock_iter_next()
2091 return gi->gl; in gfs2_glock_seq_start()
2102 return gi->gl; in gfs2_glock_seq_next()
2181 gi->gl = NULL; in __gfs2_glocks_open()
2197 if (gi->gl) in gfs2_glocks_release()
2198 gfs2_glock_put(gi->gl); in gfs2_glocks_release()