• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/buffer_head.h>
16 #include <linux/delay.h>
17 #include <linux/sort.h>
18 #include <linux/jhash.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <asm/uaccess.h>
25 #include <linux/seq_file.h>
26 #include <linux/debugfs.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/workqueue.h>
30 #include <linux/jiffies.h>
31 #include <linux/rcupdate.h>
32 #include <linux/rculist_bl.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/percpu.h>
35 #include <linux/list_sort.h>
36 #include <linux/lockref.h>
37 #include <linux/rhashtable.h>
38 
39 #include "gfs2.h"
40 #include "incore.h"
41 #include "glock.h"
42 #include "glops.h"
43 #include "inode.h"
44 #include "lops.h"
45 #include "meta_io.h"
46 #include "quota.h"
47 #include "super.h"
48 #include "util.h"
49 #include "bmap.h"
50 #define CREATE_TRACE_POINTS
51 #include "trace_gfs2.h"
52 
53 struct gfs2_glock_iter {
54 	struct gfs2_sbd *sdp;		/* incore superblock           */
55 	struct rhashtable_iter hti;	/* rhashtable iterator         */
56 	struct gfs2_glock *gl;		/* current glock struct        */
57 	loff_t last_pos;		/* last position               */
58 };
59 
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
61 
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63 
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 struct workqueue_struct *gfs2_delete_workqueue;
67 static LIST_HEAD(lru_list);
68 static atomic_t lru_count = ATOMIC_INIT(0);
69 static DEFINE_SPINLOCK(lru_lock);
70 
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
73 
74 static struct rhashtable_params ht_parms = {
75 	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
76 	.key_len = sizeof(struct lm_lockname),
77 	.key_offset = offsetof(struct gfs2_glock, gl_name),
78 	.head_offset = offsetof(struct gfs2_glock, gl_node),
79 };
80 
81 static struct rhashtable gl_hash_table;
82 
gfs2_glock_dealloc(struct rcu_head * rcu)83 static void gfs2_glock_dealloc(struct rcu_head *rcu)
84 {
85 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
86 
87 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
88 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
89 	} else {
90 		kfree(gl->gl_lksb.sb_lvbptr);
91 		kmem_cache_free(gfs2_glock_cachep, gl);
92 	}
93 }
94 
gfs2_glock_free(struct gfs2_glock * gl)95 void gfs2_glock_free(struct gfs2_glock *gl)
96 {
97 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
98 
99 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
100 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
101 		wake_up(&sdp->sd_glock_wait);
102 }
103 
104 /**
105  * gfs2_glock_hold() - increment reference count on glock
106  * @gl: The glock to hold
107  *
108  */
109 
gfs2_glock_hold(struct gfs2_glock * gl)110 static void gfs2_glock_hold(struct gfs2_glock *gl)
111 {
112 	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
113 	lockref_get(&gl->gl_lockref);
114 }
115 
116 /**
117  * demote_ok - Check to see if it's ok to unlock a glock
118  * @gl: the glock
119  *
120  * Returns: 1 if it's ok
121  */
122 
demote_ok(const struct gfs2_glock * gl)123 static int demote_ok(const struct gfs2_glock *gl)
124 {
125 	const struct gfs2_glock_operations *glops = gl->gl_ops;
126 
127 	if (gl->gl_state == LM_ST_UNLOCKED)
128 		return 0;
129 	if (!list_empty(&gl->gl_holders))
130 		return 0;
131 	if (glops->go_demote_ok)
132 		return glops->go_demote_ok(gl);
133 	return 1;
134 }
135 
136 
gfs2_glock_add_to_lru(struct gfs2_glock * gl)137 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
138 {
139 	spin_lock(&lru_lock);
140 
141 	if (!list_empty(&gl->gl_lru))
142 		list_del_init(&gl->gl_lru);
143 	else
144 		atomic_inc(&lru_count);
145 
146 	list_add_tail(&gl->gl_lru, &lru_list);
147 	set_bit(GLF_LRU, &gl->gl_flags);
148 	spin_unlock(&lru_lock);
149 }
150 
gfs2_glock_remove_from_lru(struct gfs2_glock * gl)151 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
152 {
153 	spin_lock(&lru_lock);
154 	if (!list_empty(&gl->gl_lru)) {
155 		list_del_init(&gl->gl_lru);
156 		atomic_dec(&lru_count);
157 		clear_bit(GLF_LRU, &gl->gl_flags);
158 	}
159 	spin_unlock(&lru_lock);
160 }
161 
162 /**
163  * gfs2_glock_put() - Decrement reference count on glock
164  * @gl: The glock to put
165  *
166  */
167 
gfs2_glock_put(struct gfs2_glock * gl)168 void gfs2_glock_put(struct gfs2_glock *gl)
169 {
170 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
171 	struct address_space *mapping = gfs2_glock2aspace(gl);
172 
173 	if (lockref_put_or_lock(&gl->gl_lockref))
174 		return;
175 
176 	lockref_mark_dead(&gl->gl_lockref);
177 
178 	gfs2_glock_remove_from_lru(gl);
179 	spin_unlock(&gl->gl_lockref.lock);
180 	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
181 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
182 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
183 	trace_gfs2_glock_put(gl);
184 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
185 }
186 
187 /**
188  * may_grant - check if its ok to grant a new lock
189  * @gl: The glock
190  * @gh: The lock request which we wish to grant
191  *
192  * Returns: true if its ok to grant the lock
193  */
194 
may_grant(const struct gfs2_glock * gl,const struct gfs2_holder * gh)195 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
196 {
197 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
198 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
199 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
200 		return 0;
201 	if (gl->gl_state == gh->gh_state)
202 		return 1;
203 	if (gh->gh_flags & GL_EXACT)
204 		return 0;
205 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
206 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
207 			return 1;
208 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
209 			return 1;
210 	}
211 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
212 		return 1;
213 	return 0;
214 }
215 
gfs2_holder_wake(struct gfs2_holder * gh)216 static void gfs2_holder_wake(struct gfs2_holder *gh)
217 {
218 	clear_bit(HIF_WAIT, &gh->gh_iflags);
219 	smp_mb__after_atomic();
220 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
221 }
222 
223 /**
224  * do_error - Something unexpected has happened during a lock request
225  *
226  */
227 
do_error(struct gfs2_glock * gl,const int ret)228 static void do_error(struct gfs2_glock *gl, const int ret)
229 {
230 	struct gfs2_holder *gh, *tmp;
231 
232 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
233 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
234 			continue;
235 		if (ret & LM_OUT_ERROR)
236 			gh->gh_error = -EIO;
237 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
238 			gh->gh_error = GLR_TRYFAILED;
239 		else
240 			continue;
241 		list_del_init(&gh->gh_list);
242 		trace_gfs2_glock_queue(gh, 0);
243 		gfs2_holder_wake(gh);
244 	}
245 }
246 
247 /**
248  * do_promote - promote as many requests as possible on the current queue
249  * @gl: The glock
250  *
251  * Returns: 1 if there is a blocked holder at the head of the list, or 2
252  *          if a type specific operation is underway.
253  */
254 
do_promote(struct gfs2_glock * gl)255 static int do_promote(struct gfs2_glock *gl)
256 __releases(&gl->gl_lockref.lock)
257 __acquires(&gl->gl_lockref.lock)
258 {
259 	const struct gfs2_glock_operations *glops = gl->gl_ops;
260 	struct gfs2_holder *gh, *tmp;
261 	int ret;
262 
263 restart:
264 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
265 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
266 			continue;
267 		if (may_grant(gl, gh)) {
268 			if (gh->gh_list.prev == &gl->gl_holders &&
269 			    glops->go_lock) {
270 				spin_unlock(&gl->gl_lockref.lock);
271 				/* FIXME: eliminate this eventually */
272 				ret = glops->go_lock(gh);
273 				spin_lock(&gl->gl_lockref.lock);
274 				if (ret) {
275 					if (ret == 1)
276 						return 2;
277 					gh->gh_error = ret;
278 					list_del_init(&gh->gh_list);
279 					trace_gfs2_glock_queue(gh, 0);
280 					gfs2_holder_wake(gh);
281 					goto restart;
282 				}
283 				set_bit(HIF_HOLDER, &gh->gh_iflags);
284 				trace_gfs2_promote(gh, 1);
285 				gfs2_holder_wake(gh);
286 				goto restart;
287 			}
288 			set_bit(HIF_HOLDER, &gh->gh_iflags);
289 			trace_gfs2_promote(gh, 0);
290 			gfs2_holder_wake(gh);
291 			continue;
292 		}
293 		if (gh->gh_list.prev == &gl->gl_holders)
294 			return 1;
295 		do_error(gl, 0);
296 		break;
297 	}
298 	return 0;
299 }
300 
301 /**
302  * find_first_waiter - find the first gh that's waiting for the glock
303  * @gl: the glock
304  */
305 
find_first_waiter(const struct gfs2_glock * gl)306 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
307 {
308 	struct gfs2_holder *gh;
309 
310 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
311 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
312 			return gh;
313 	}
314 	return NULL;
315 }
316 
317 /**
318  * state_change - record that the glock is now in a different state
319  * @gl: the glock
320  * @new_state the new state
321  *
322  */
323 
state_change(struct gfs2_glock * gl,unsigned int new_state)324 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
325 {
326 	int held1, held2;
327 
328 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
329 	held2 = (new_state != LM_ST_UNLOCKED);
330 
331 	if (held1 != held2) {
332 		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
333 		if (held2)
334 			gl->gl_lockref.count++;
335 		else
336 			gl->gl_lockref.count--;
337 	}
338 	if (held1 && held2 && list_empty(&gl->gl_holders))
339 		clear_bit(GLF_QUEUED, &gl->gl_flags);
340 
341 	if (new_state != gl->gl_target)
342 		/* shorten our minimum hold time */
343 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
344 				       GL_GLOCK_MIN_HOLD);
345 	gl->gl_state = new_state;
346 	gl->gl_tchange = jiffies;
347 }
348 
gfs2_demote_wake(struct gfs2_glock * gl)349 static void gfs2_demote_wake(struct gfs2_glock *gl)
350 {
351 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
352 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
353 	smp_mb__after_atomic();
354 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
355 }
356 
357 /**
358  * finish_xmote - The DLM has replied to one of our lock requests
359  * @gl: The glock
360  * @ret: The status from the DLM
361  *
362  */
363 
finish_xmote(struct gfs2_glock * gl,unsigned int ret)364 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
365 {
366 	const struct gfs2_glock_operations *glops = gl->gl_ops;
367 	struct gfs2_holder *gh;
368 	unsigned state = ret & LM_OUT_ST_MASK;
369 	int rv;
370 
371 	spin_lock(&gl->gl_lockref.lock);
372 	trace_gfs2_glock_state_change(gl, state);
373 	state_change(gl, state);
374 	gh = find_first_waiter(gl);
375 
376 	/* Demote to UN request arrived during demote to SH or DF */
377 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
378 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
379 		gl->gl_target = LM_ST_UNLOCKED;
380 
381 	/* Check for state != intended state */
382 	if (unlikely(state != gl->gl_target)) {
383 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
384 			/* move to back of queue and try next entry */
385 			if (ret & LM_OUT_CANCELED) {
386 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
387 					list_move_tail(&gh->gh_list, &gl->gl_holders);
388 				gh = find_first_waiter(gl);
389 				gl->gl_target = gh->gh_state;
390 				goto retry;
391 			}
392 			/* Some error or failed "try lock" - report it */
393 			if ((ret & LM_OUT_ERROR) ||
394 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
395 				gl->gl_target = gl->gl_state;
396 				do_error(gl, ret);
397 				goto out;
398 			}
399 		}
400 		switch(state) {
401 		/* Unlocked due to conversion deadlock, try again */
402 		case LM_ST_UNLOCKED:
403 retry:
404 			do_xmote(gl, gh, gl->gl_target);
405 			break;
406 		/* Conversion fails, unlock and try again */
407 		case LM_ST_SHARED:
408 		case LM_ST_DEFERRED:
409 			do_xmote(gl, gh, LM_ST_UNLOCKED);
410 			break;
411 		default: /* Everything else */
412 			pr_err("wanted %u got %u\n", gl->gl_target, state);
413 			GLOCK_BUG_ON(gl, 1);
414 		}
415 		spin_unlock(&gl->gl_lockref.lock);
416 		return;
417 	}
418 
419 	/* Fast path - we got what we asked for */
420 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
421 		gfs2_demote_wake(gl);
422 	if (state != LM_ST_UNLOCKED) {
423 		if (glops->go_xmote_bh) {
424 			spin_unlock(&gl->gl_lockref.lock);
425 			rv = glops->go_xmote_bh(gl, gh);
426 			spin_lock(&gl->gl_lockref.lock);
427 			if (rv) {
428 				do_error(gl, rv);
429 				goto out;
430 			}
431 		}
432 		rv = do_promote(gl);
433 		if (rv == 2)
434 			goto out_locked;
435 	}
436 out:
437 	clear_bit(GLF_LOCK, &gl->gl_flags);
438 out_locked:
439 	spin_unlock(&gl->gl_lockref.lock);
440 }
441 
442 /**
443  * do_xmote - Calls the DLM to change the state of a lock
444  * @gl: The lock state
445  * @gh: The holder (only for promotes)
446  * @target: The target lock state
447  *
448  */
449 
do_xmote(struct gfs2_glock * gl,struct gfs2_holder * gh,unsigned int target)450 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
451 __releases(&gl->gl_lockref.lock)
452 __acquires(&gl->gl_lockref.lock)
453 {
454 	const struct gfs2_glock_operations *glops = gl->gl_ops;
455 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
456 	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
457 	int ret;
458 
459 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
460 		      LM_FLAG_PRIORITY);
461 	GLOCK_BUG_ON(gl, gl->gl_state == target);
462 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
463 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
464 	    glops->go_inval) {
465 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
466 		do_error(gl, 0); /* Fail queued try locks */
467 	}
468 	gl->gl_req = target;
469 	set_bit(GLF_BLOCKING, &gl->gl_flags);
470 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
471 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
472 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
473 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
474 	spin_unlock(&gl->gl_lockref.lock);
475 	if (glops->go_sync)
476 		glops->go_sync(gl);
477 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
478 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
479 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
480 
481 	gfs2_glock_hold(gl);
482 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
483 		/* lock_dlm */
484 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
485 		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
486 		    target == LM_ST_UNLOCKED &&
487 		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
488 			finish_xmote(gl, target);
489 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
490 				gfs2_glock_put(gl);
491 		}
492 		else if (ret) {
493 			pr_err("lm_lock ret %d\n", ret);
494 			GLOCK_BUG_ON(gl, 1);
495 		}
496 	} else { /* lock_nolock */
497 		finish_xmote(gl, target);
498 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
499 			gfs2_glock_put(gl);
500 	}
501 
502 	spin_lock(&gl->gl_lockref.lock);
503 }
504 
505 /**
506  * find_first_holder - find the first "holder" gh
507  * @gl: the glock
508  */
509 
find_first_holder(const struct gfs2_glock * gl)510 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
511 {
512 	struct gfs2_holder *gh;
513 
514 	if (!list_empty(&gl->gl_holders)) {
515 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
516 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
517 			return gh;
518 	}
519 	return NULL;
520 }
521 
522 /**
523  * run_queue - do all outstanding tasks related to a glock
524  * @gl: The glock in question
525  * @nonblock: True if we must not block in run_queue
526  *
527  */
528 
run_queue(struct gfs2_glock * gl,const int nonblock)529 static void run_queue(struct gfs2_glock *gl, const int nonblock)
530 __releases(&gl->gl_lockref.lock)
531 __acquires(&gl->gl_lockref.lock)
532 {
533 	struct gfs2_holder *gh = NULL;
534 	int ret;
535 
536 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
537 		return;
538 
539 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
540 
541 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
542 	    gl->gl_demote_state != gl->gl_state) {
543 		if (find_first_holder(gl))
544 			goto out_unlock;
545 		if (nonblock)
546 			goto out_sched;
547 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
548 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
549 		gl->gl_target = gl->gl_demote_state;
550 	} else {
551 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
552 			gfs2_demote_wake(gl);
553 		ret = do_promote(gl);
554 		if (ret == 0)
555 			goto out_unlock;
556 		if (ret == 2)
557 			goto out;
558 		gh = find_first_waiter(gl);
559 		gl->gl_target = gh->gh_state;
560 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
561 			do_error(gl, 0); /* Fail queued try locks */
562 	}
563 	do_xmote(gl, gh, gl->gl_target);
564 out:
565 	return;
566 
567 out_sched:
568 	clear_bit(GLF_LOCK, &gl->gl_flags);
569 	smp_mb__after_atomic();
570 	gl->gl_lockref.count++;
571 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
572 		gl->gl_lockref.count--;
573 	return;
574 
575 out_unlock:
576 	clear_bit(GLF_LOCK, &gl->gl_flags);
577 	smp_mb__after_atomic();
578 	return;
579 }
580 
delete_work_func(struct work_struct * work)581 static void delete_work_func(struct work_struct *work)
582 {
583 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
584 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
585 	struct inode *inode;
586 	u64 no_addr = gl->gl_name.ln_number;
587 
588 	/* If someone's using this glock to create a new dinode, the block must
589 	   have been freed by another node, then re-used, in which case our
590 	   iopen callback is too late after the fact. Ignore it. */
591 	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
592 		goto out;
593 
594 	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
595 	if (inode && !IS_ERR(inode)) {
596 		d_prune_aliases(inode);
597 		iput(inode);
598 	}
599 out:
600 	gfs2_glock_put(gl);
601 }
602 
glock_work_func(struct work_struct * work)603 static void glock_work_func(struct work_struct *work)
604 {
605 	unsigned long delay = 0;
606 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
607 	int drop_ref = 0;
608 
609 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
610 		finish_xmote(gl, gl->gl_reply);
611 		drop_ref = 1;
612 	}
613 	spin_lock(&gl->gl_lockref.lock);
614 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
615 	    gl->gl_state != LM_ST_UNLOCKED &&
616 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
617 		unsigned long holdtime, now = jiffies;
618 
619 		holdtime = gl->gl_tchange + gl->gl_hold_time;
620 		if (time_before(now, holdtime))
621 			delay = holdtime - now;
622 
623 		if (!delay) {
624 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
625 			set_bit(GLF_DEMOTE, &gl->gl_flags);
626 		}
627 	}
628 	run_queue(gl, 0);
629 	spin_unlock(&gl->gl_lockref.lock);
630 	if (!delay)
631 		gfs2_glock_put(gl);
632 	else {
633 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
634 			delay = 0;
635 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
636 			gfs2_glock_put(gl);
637 	}
638 	if (drop_ref)
639 		gfs2_glock_put(gl);
640 }
641 
642 /**
643  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
644  * @sdp: The GFS2 superblock
645  * @number: the lock number
646  * @glops: The glock_operations to use
647  * @create: If 0, don't create the glock if it doesn't exist
648  * @glp: the glock is returned here
649  *
650  * This does not lock a glock, just finds/creates structures for one.
651  *
652  * Returns: errno
653  */
654 
gfs2_glock_get(struct gfs2_sbd * sdp,u64 number,const struct gfs2_glock_operations * glops,int create,struct gfs2_glock ** glp)655 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
656 		   const struct gfs2_glock_operations *glops, int create,
657 		   struct gfs2_glock **glp)
658 {
659 	struct super_block *s = sdp->sd_vfs;
660 	struct lm_lockname name = { .ln_number = number,
661 				    .ln_type = glops->go_type,
662 				    .ln_sbd = sdp };
663 	struct gfs2_glock *gl, *tmp = NULL;
664 	struct address_space *mapping;
665 	struct kmem_cache *cachep;
666 	int ret, tries = 0;
667 
668 	rcu_read_lock();
669 	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
670 	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
671 		gl = NULL;
672 	rcu_read_unlock();
673 
674 	*glp = gl;
675 	if (gl)
676 		return 0;
677 	if (!create)
678 		return -ENOENT;
679 
680 	if (glops->go_flags & GLOF_ASPACE)
681 		cachep = gfs2_glock_aspace_cachep;
682 	else
683 		cachep = gfs2_glock_cachep;
684 	gl = kmem_cache_alloc(cachep, GFP_NOFS);
685 	if (!gl)
686 		return -ENOMEM;
687 
688 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
689 
690 	if (glops->go_flags & GLOF_LVB) {
691 		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
692 		if (!gl->gl_lksb.sb_lvbptr) {
693 			kmem_cache_free(cachep, gl);
694 			return -ENOMEM;
695 		}
696 	}
697 
698 	atomic_inc(&sdp->sd_glock_disposal);
699 	gl->gl_node.next = NULL;
700 	gl->gl_flags = 0;
701 	gl->gl_name = name;
702 	gl->gl_lockref.count = 1;
703 	gl->gl_state = LM_ST_UNLOCKED;
704 	gl->gl_target = LM_ST_UNLOCKED;
705 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
706 	gl->gl_ops = glops;
707 	gl->gl_dstamp = ktime_set(0, 0);
708 	preempt_disable();
709 	/* We use the global stats to estimate the initial per-glock stats */
710 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
711 	preempt_enable();
712 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
713 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
714 	gl->gl_tchange = jiffies;
715 	gl->gl_object = NULL;
716 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
717 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
718 	INIT_WORK(&gl->gl_delete, delete_work_func);
719 
720 	mapping = gfs2_glock2aspace(gl);
721 	if (mapping) {
722                 mapping->a_ops = &gfs2_meta_aops;
723 		mapping->host = s->s_bdev->bd_inode;
724 		mapping->flags = 0;
725 		mapping_set_gfp_mask(mapping, GFP_NOFS);
726 		mapping->private_data = NULL;
727 		mapping->writeback_index = 0;
728 	}
729 
730 again:
731 	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
732 					    ht_parms);
733 	if (ret == 0) {
734 		*glp = gl;
735 		return 0;
736 	}
737 
738 	if (ret == -EEXIST) {
739 		ret = 0;
740 		rcu_read_lock();
741 		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
742 		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
743 			if (++tries < 100) {
744 				rcu_read_unlock();
745 				cond_resched();
746 				goto again;
747 			}
748 			tmp = NULL;
749 			ret = -ENOMEM;
750 		}
751 		rcu_read_unlock();
752 	} else {
753 		WARN_ON_ONCE(ret);
754 	}
755 	kfree(gl->gl_lksb.sb_lvbptr);
756 	kmem_cache_free(cachep, gl);
757 	atomic_dec(&sdp->sd_glock_disposal);
758 	*glp = tmp;
759 
760 	return ret;
761 }
762 
763 /**
764  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
765  * @gl: the glock
766  * @state: the state we're requesting
767  * @flags: the modifier flags
768  * @gh: the holder structure
769  *
770  */
771 
gfs2_holder_init(struct gfs2_glock * gl,unsigned int state,u16 flags,struct gfs2_holder * gh)772 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
773 		      struct gfs2_holder *gh)
774 {
775 	INIT_LIST_HEAD(&gh->gh_list);
776 	gh->gh_gl = gl;
777 	gh->gh_ip = _RET_IP_;
778 	gh->gh_owner_pid = get_pid(task_pid(current));
779 	gh->gh_state = state;
780 	gh->gh_flags = flags;
781 	gh->gh_error = 0;
782 	gh->gh_iflags = 0;
783 	gfs2_glock_hold(gl);
784 }
785 
786 /**
787  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
788  * @state: the state we're requesting
789  * @flags: the modifier flags
790  * @gh: the holder structure
791  *
792  * Don't mess with the glock.
793  *
794  */
795 
gfs2_holder_reinit(unsigned int state,u16 flags,struct gfs2_holder * gh)796 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
797 {
798 	gh->gh_state = state;
799 	gh->gh_flags = flags;
800 	gh->gh_iflags = 0;
801 	gh->gh_ip = _RET_IP_;
802 	put_pid(gh->gh_owner_pid);
803 	gh->gh_owner_pid = get_pid(task_pid(current));
804 }
805 
806 /**
807  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
808  * @gh: the holder structure
809  *
810  */
811 
gfs2_holder_uninit(struct gfs2_holder * gh)812 void gfs2_holder_uninit(struct gfs2_holder *gh)
813 {
814 	put_pid(gh->gh_owner_pid);
815 	gfs2_glock_put(gh->gh_gl);
816 	gfs2_holder_mark_uninitialized(gh);
817 	gh->gh_ip = 0;
818 }
819 
820 /**
821  * gfs2_glock_wait - wait on a glock acquisition
822  * @gh: the glock holder
823  *
824  * Returns: 0 on success
825  */
826 
gfs2_glock_wait(struct gfs2_holder * gh)827 int gfs2_glock_wait(struct gfs2_holder *gh)
828 {
829 	unsigned long time1 = jiffies;
830 
831 	might_sleep();
832 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
833 	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
834 		/* Lengthen the minimum hold time. */
835 		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
836 					      GL_GLOCK_HOLD_INCR,
837 					      GL_GLOCK_MAX_HOLD);
838 	return gh->gh_error;
839 }
840 
841 /**
842  * handle_callback - process a demote request
843  * @gl: the glock
844  * @state: the state the caller wants us to change to
845  *
846  * There are only two requests that we are going to see in actual
847  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
848  */
849 
handle_callback(struct gfs2_glock * gl,unsigned int state,unsigned long delay,bool remote)850 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
851 			    unsigned long delay, bool remote)
852 {
853 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
854 
855 	set_bit(bit, &gl->gl_flags);
856 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
857 		gl->gl_demote_state = state;
858 		gl->gl_demote_time = jiffies;
859 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
860 			gl->gl_demote_state != state) {
861 		gl->gl_demote_state = LM_ST_UNLOCKED;
862 	}
863 	if (gl->gl_ops->go_callback)
864 		gl->gl_ops->go_callback(gl, remote);
865 	trace_gfs2_demote_rq(gl, remote);
866 }
867 
gfs2_print_dbg(struct seq_file * seq,const char * fmt,...)868 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
869 {
870 	struct va_format vaf;
871 	va_list args;
872 
873 	va_start(args, fmt);
874 
875 	if (seq) {
876 		seq_vprintf(seq, fmt, args);
877 	} else {
878 		vaf.fmt = fmt;
879 		vaf.va = &args;
880 
881 		pr_err("%pV", &vaf);
882 	}
883 
884 	va_end(args);
885 }
886 
887 /**
888  * add_to_queue - Add a holder to the wait queue (but look for recursion)
889  * @gh: the holder structure to add
890  *
891  * Eventually we should move the recursive locking trap to a
892  * debugging option or something like that. This is the fast
893  * path and needs to have the minimum number of distractions.
894  *
895  */
896 
add_to_queue(struct gfs2_holder * gh)897 static inline void add_to_queue(struct gfs2_holder *gh)
898 __releases(&gl->gl_lockref.lock)
899 __acquires(&gl->gl_lockref.lock)
900 {
901 	struct gfs2_glock *gl = gh->gh_gl;
902 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
903 	struct list_head *insert_pt = NULL;
904 	struct gfs2_holder *gh2;
905 	int try_futile = 0;
906 
907 	BUG_ON(gh->gh_owner_pid == NULL);
908 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
909 		BUG();
910 
911 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
912 		if (test_bit(GLF_LOCK, &gl->gl_flags))
913 			try_futile = !may_grant(gl, gh);
914 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
915 			goto fail;
916 	}
917 
918 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
919 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
920 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
921 			goto trap_recursive;
922 		if (try_futile &&
923 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
924 fail:
925 			gh->gh_error = GLR_TRYFAILED;
926 			gfs2_holder_wake(gh);
927 			return;
928 		}
929 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
930 			continue;
931 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
932 			insert_pt = &gh2->gh_list;
933 	}
934 	set_bit(GLF_QUEUED, &gl->gl_flags);
935 	trace_gfs2_glock_queue(gh, 1);
936 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
937 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
938 	if (likely(insert_pt == NULL)) {
939 		list_add_tail(&gh->gh_list, &gl->gl_holders);
940 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
941 			goto do_cancel;
942 		return;
943 	}
944 	list_add_tail(&gh->gh_list, insert_pt);
945 do_cancel:
946 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
947 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
948 		spin_unlock(&gl->gl_lockref.lock);
949 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
950 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
951 		spin_lock(&gl->gl_lockref.lock);
952 	}
953 	return;
954 
955 trap_recursive:
956 	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
957 	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
958 	pr_err("lock type: %d req lock state : %d\n",
959 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
960 	pr_err("new: %pSR\n", (void *)gh->gh_ip);
961 	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
962 	pr_err("lock type: %d req lock state : %d\n",
963 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
964 	gfs2_dump_glock(NULL, gl);
965 	BUG();
966 }
967 
968 /**
969  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
970  * @gh: the holder structure
971  *
972  * if (gh->gh_flags & GL_ASYNC), this never returns an error
973  *
974  * Returns: 0, GLR_TRYFAILED, or errno on failure
975  */
976 
gfs2_glock_nq(struct gfs2_holder * gh)977 int gfs2_glock_nq(struct gfs2_holder *gh)
978 {
979 	struct gfs2_glock *gl = gh->gh_gl;
980 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
981 	int error = 0;
982 
983 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
984 		return -EIO;
985 
986 	if (test_bit(GLF_LRU, &gl->gl_flags))
987 		gfs2_glock_remove_from_lru(gl);
988 
989 	spin_lock(&gl->gl_lockref.lock);
990 	add_to_queue(gh);
991 	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
992 		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
993 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
994 		gl->gl_lockref.count++;
995 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
996 			gl->gl_lockref.count--;
997 	}
998 	run_queue(gl, 1);
999 	spin_unlock(&gl->gl_lockref.lock);
1000 
1001 	if (!(gh->gh_flags & GL_ASYNC))
1002 		error = gfs2_glock_wait(gh);
1003 
1004 	return error;
1005 }
1006 
1007 /**
1008  * gfs2_glock_poll - poll to see if an async request has been completed
1009  * @gh: the holder
1010  *
1011  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1012  */
1013 
gfs2_glock_poll(struct gfs2_holder * gh)1014 int gfs2_glock_poll(struct gfs2_holder *gh)
1015 {
1016 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1017 }
1018 
1019 /**
1020  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1021  * @gh: the glock holder
1022  *
1023  */
1024 
gfs2_glock_dq(struct gfs2_holder * gh)1025 void gfs2_glock_dq(struct gfs2_holder *gh)
1026 {
1027 	struct gfs2_glock *gl = gh->gh_gl;
1028 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1029 	unsigned delay = 0;
1030 	int fast_path = 0;
1031 
1032 	spin_lock(&gl->gl_lockref.lock);
1033 	if (gh->gh_flags & GL_NOCACHE)
1034 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1035 
1036 	list_del_init(&gh->gh_list);
1037 	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1038 	if (find_first_holder(gl) == NULL) {
1039 		if (glops->go_unlock) {
1040 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1041 			spin_unlock(&gl->gl_lockref.lock);
1042 			glops->go_unlock(gh);
1043 			spin_lock(&gl->gl_lockref.lock);
1044 			clear_bit(GLF_LOCK, &gl->gl_flags);
1045 		}
1046 		if (list_empty(&gl->gl_holders) &&
1047 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1048 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1049 			fast_path = 1;
1050 	}
1051 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1052 	    (glops->go_flags & GLOF_LRU))
1053 		gfs2_glock_add_to_lru(gl);
1054 
1055 	trace_gfs2_glock_queue(gh, 0);
1056 	spin_unlock(&gl->gl_lockref.lock);
1057 	if (likely(fast_path))
1058 		return;
1059 
1060 	gfs2_glock_hold(gl);
1061 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1062 	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1063 	    gl->gl_name.ln_type == LM_TYPE_INODE)
1064 		delay = gl->gl_hold_time;
1065 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1066 		gfs2_glock_put(gl);
1067 }
1068 
gfs2_glock_dq_wait(struct gfs2_holder * gh)1069 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1070 {
1071 	struct gfs2_glock *gl = gh->gh_gl;
1072 	gfs2_glock_dq(gh);
1073 	might_sleep();
1074 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1075 }
1076 
1077 /**
1078  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1079  * @gh: the holder structure
1080  *
1081  */
1082 
gfs2_glock_dq_uninit(struct gfs2_holder * gh)1083 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1084 {
1085 	gfs2_glock_dq(gh);
1086 	gfs2_holder_uninit(gh);
1087 }
1088 
1089 /**
1090  * gfs2_glock_nq_num - acquire a glock based on lock number
1091  * @sdp: the filesystem
1092  * @number: the lock number
1093  * @glops: the glock operations for the type of glock
1094  * @state: the state to acquire the glock in
1095  * @flags: modifier flags for the acquisition
1096  * @gh: the struct gfs2_holder
1097  *
1098  * Returns: errno
1099  */
1100 
gfs2_glock_nq_num(struct gfs2_sbd * sdp,u64 number,const struct gfs2_glock_operations * glops,unsigned int state,u16 flags,struct gfs2_holder * gh)1101 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1102 		      const struct gfs2_glock_operations *glops,
1103 		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1104 {
1105 	struct gfs2_glock *gl;
1106 	int error;
1107 
1108 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1109 	if (!error) {
1110 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1111 		gfs2_glock_put(gl);
1112 	}
1113 
1114 	return error;
1115 }
1116 
1117 /**
1118  * glock_compare - Compare two struct gfs2_glock structures for sorting
1119  * @arg_a: the first structure
1120  * @arg_b: the second structure
1121  *
1122  */
1123 
glock_compare(const void * arg_a,const void * arg_b)1124 static int glock_compare(const void *arg_a, const void *arg_b)
1125 {
1126 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1127 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1128 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1129 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1130 
1131 	if (a->ln_number > b->ln_number)
1132 		return 1;
1133 	if (a->ln_number < b->ln_number)
1134 		return -1;
1135 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1136 	return 0;
1137 }
1138 
1139 /**
1140  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1141  * @num_gh: the number of structures
1142  * @ghs: an array of struct gfs2_holder structures
1143  *
1144  * Returns: 0 on success (all glocks acquired),
1145  *          errno on failure (no glocks acquired)
1146  */
1147 
nq_m_sync(unsigned int num_gh,struct gfs2_holder * ghs,struct gfs2_holder ** p)1148 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1149 		     struct gfs2_holder **p)
1150 {
1151 	unsigned int x;
1152 	int error = 0;
1153 
1154 	for (x = 0; x < num_gh; x++)
1155 		p[x] = &ghs[x];
1156 
1157 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1158 
1159 	for (x = 0; x < num_gh; x++) {
1160 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1161 
1162 		error = gfs2_glock_nq(p[x]);
1163 		if (error) {
1164 			while (x--)
1165 				gfs2_glock_dq(p[x]);
1166 			break;
1167 		}
1168 	}
1169 
1170 	return error;
1171 }
1172 
1173 /**
1174  * gfs2_glock_nq_m - acquire multiple glocks
1175  * @num_gh: the number of structures
1176  * @ghs: an array of struct gfs2_holder structures
1177  *
1178  *
1179  * Returns: 0 on success (all glocks acquired),
1180  *          errno on failure (no glocks acquired)
1181  */
1182 
gfs2_glock_nq_m(unsigned int num_gh,struct gfs2_holder * ghs)1183 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1184 {
1185 	struct gfs2_holder *tmp[4];
1186 	struct gfs2_holder **pph = tmp;
1187 	int error = 0;
1188 
1189 	switch(num_gh) {
1190 	case 0:
1191 		return 0;
1192 	case 1:
1193 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1194 		return gfs2_glock_nq(ghs);
1195 	default:
1196 		if (num_gh <= 4)
1197 			break;
1198 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1199 		if (!pph)
1200 			return -ENOMEM;
1201 	}
1202 
1203 	error = nq_m_sync(num_gh, ghs, pph);
1204 
1205 	if (pph != tmp)
1206 		kfree(pph);
1207 
1208 	return error;
1209 }
1210 
1211 /**
1212  * gfs2_glock_dq_m - release multiple glocks
1213  * @num_gh: the number of structures
1214  * @ghs: an array of struct gfs2_holder structures
1215  *
1216  */
1217 
gfs2_glock_dq_m(unsigned int num_gh,struct gfs2_holder * ghs)1218 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1219 {
1220 	while (num_gh--)
1221 		gfs2_glock_dq(&ghs[num_gh]);
1222 }
1223 
gfs2_glock_cb(struct gfs2_glock * gl,unsigned int state)1224 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1225 {
1226 	unsigned long delay = 0;
1227 	unsigned long holdtime;
1228 	unsigned long now = jiffies;
1229 
1230 	gfs2_glock_hold(gl);
1231 	holdtime = gl->gl_tchange + gl->gl_hold_time;
1232 	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1233 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1234 		if (time_before(now, holdtime))
1235 			delay = holdtime - now;
1236 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1237 			delay = gl->gl_hold_time;
1238 	}
1239 
1240 	spin_lock(&gl->gl_lockref.lock);
1241 	handle_callback(gl, state, delay, true);
1242 	spin_unlock(&gl->gl_lockref.lock);
1243 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1244 		gfs2_glock_put(gl);
1245 }
1246 
1247 /**
1248  * gfs2_should_freeze - Figure out if glock should be frozen
1249  * @gl: The glock in question
1250  *
1251  * Glocks are not frozen if (a) the result of the dlm operation is
1252  * an error, (b) the locking operation was an unlock operation or
1253  * (c) if there is a "noexp" flagged request anywhere in the queue
1254  *
1255  * Returns: 1 if freezing should occur, 0 otherwise
1256  */
1257 
gfs2_should_freeze(const struct gfs2_glock * gl)1258 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1259 {
1260 	const struct gfs2_holder *gh;
1261 
1262 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1263 		return 0;
1264 	if (gl->gl_target == LM_ST_UNLOCKED)
1265 		return 0;
1266 
1267 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1268 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1269 			continue;
1270 		if (LM_FLAG_NOEXP & gh->gh_flags)
1271 			return 0;
1272 	}
1273 
1274 	return 1;
1275 }
1276 
1277 /**
1278  * gfs2_glock_complete - Callback used by locking
1279  * @gl: Pointer to the glock
1280  * @ret: The return value from the dlm
1281  *
1282  * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1283  * to use a bitfield shared with other glock state fields.
1284  */
1285 
gfs2_glock_complete(struct gfs2_glock * gl,int ret)1286 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1287 {
1288 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1289 
1290 	spin_lock(&gl->gl_lockref.lock);
1291 	gl->gl_reply = ret;
1292 
1293 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1294 		if (gfs2_should_freeze(gl)) {
1295 			set_bit(GLF_FROZEN, &gl->gl_flags);
1296 			spin_unlock(&gl->gl_lockref.lock);
1297 			return;
1298 		}
1299 	}
1300 
1301 	gl->gl_lockref.count++;
1302 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1303 	spin_unlock(&gl->gl_lockref.lock);
1304 
1305 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1306 		gfs2_glock_put(gl);
1307 }
1308 
glock_cmp(void * priv,struct list_head * a,struct list_head * b)1309 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1310 {
1311 	struct gfs2_glock *gla, *glb;
1312 
1313 	gla = list_entry(a, struct gfs2_glock, gl_lru);
1314 	glb = list_entry(b, struct gfs2_glock, gl_lru);
1315 
1316 	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1317 		return 1;
1318 	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1319 		return -1;
1320 
1321 	return 0;
1322 }
1323 
1324 /**
1325  * gfs2_dispose_glock_lru - Demote a list of glocks
1326  * @list: The list to dispose of
1327  *
1328  * Disposing of glocks may involve disk accesses, so that here we sort
1329  * the glocks by number (i.e. disk location of the inodes) so that if
1330  * there are any such accesses, they'll be sent in order (mostly).
1331  *
1332  * Must be called under the lru_lock, but may drop and retake this
1333  * lock. While the lru_lock is dropped, entries may vanish from the
1334  * list, but no new entries will appear on the list (since it is
1335  * private)
1336  */
1337 
gfs2_dispose_glock_lru(struct list_head * list)1338 static void gfs2_dispose_glock_lru(struct list_head *list)
1339 __releases(&lru_lock)
1340 __acquires(&lru_lock)
1341 {
1342 	struct gfs2_glock *gl;
1343 
1344 	list_sort(NULL, list, glock_cmp);
1345 
1346 	while(!list_empty(list)) {
1347 		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1348 		list_del_init(&gl->gl_lru);
1349 		if (!spin_trylock(&gl->gl_lockref.lock)) {
1350 add_back_to_lru:
1351 			list_add(&gl->gl_lru, &lru_list);
1352 			atomic_inc(&lru_count);
1353 			continue;
1354 		}
1355 		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1356 			spin_unlock(&gl->gl_lockref.lock);
1357 			goto add_back_to_lru;
1358 		}
1359 		clear_bit(GLF_LRU, &gl->gl_flags);
1360 		gl->gl_lockref.count++;
1361 		if (demote_ok(gl))
1362 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1363 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1364 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1365 			gl->gl_lockref.count--;
1366 		spin_unlock(&gl->gl_lockref.lock);
1367 		cond_resched_lock(&lru_lock);
1368 	}
1369 }
1370 
1371 /**
1372  * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1373  * @nr: The number of entries to scan
1374  *
1375  * This function selects the entries on the LRU which are able to
1376  * be demoted, and then kicks off the process by calling
1377  * gfs2_dispose_glock_lru() above.
1378  */
1379 
gfs2_scan_glock_lru(int nr)1380 static long gfs2_scan_glock_lru(int nr)
1381 {
1382 	struct gfs2_glock *gl;
1383 	LIST_HEAD(skipped);
1384 	LIST_HEAD(dispose);
1385 	long freed = 0;
1386 
1387 	spin_lock(&lru_lock);
1388 	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1389 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1390 
1391 		/* Test for being demotable */
1392 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1393 			list_move(&gl->gl_lru, &dispose);
1394 			atomic_dec(&lru_count);
1395 			freed++;
1396 			continue;
1397 		}
1398 
1399 		list_move(&gl->gl_lru, &skipped);
1400 	}
1401 	list_splice(&skipped, &lru_list);
1402 	if (!list_empty(&dispose))
1403 		gfs2_dispose_glock_lru(&dispose);
1404 	spin_unlock(&lru_lock);
1405 
1406 	return freed;
1407 }
1408 
gfs2_glock_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1409 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1410 					    struct shrink_control *sc)
1411 {
1412 	if (!(sc->gfp_mask & __GFP_FS))
1413 		return SHRINK_STOP;
1414 	return gfs2_scan_glock_lru(sc->nr_to_scan);
1415 }
1416 
gfs2_glock_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1417 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1418 					     struct shrink_control *sc)
1419 {
1420 	return vfs_pressure_ratio(atomic_read(&lru_count));
1421 }
1422 
1423 static struct shrinker glock_shrinker = {
1424 	.seeks = DEFAULT_SEEKS,
1425 	.count_objects = gfs2_glock_shrink_count,
1426 	.scan_objects = gfs2_glock_shrink_scan,
1427 };
1428 
1429 /**
1430  * examine_bucket - Call a function for glock in a hash bucket
1431  * @examiner: the function
1432  * @sdp: the filesystem
1433  * @bucket: the bucket
1434  *
1435  * Note that the function can be called multiple times on the same
1436  * object.  So the user must ensure that the function can cope with
1437  * that.
1438  */
1439 
glock_hash_walk(glock_examiner examiner,const struct gfs2_sbd * sdp)1440 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1441 {
1442 	struct gfs2_glock *gl;
1443 	struct rhashtable_iter iter;
1444 
1445 	rhashtable_walk_enter(&gl_hash_table, &iter);
1446 
1447 	do {
1448 		gl = ERR_PTR(rhashtable_walk_start(&iter));
1449 		if (gl)
1450 			continue;
1451 
1452 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1453 			if ((gl->gl_name.ln_sbd == sdp) &&
1454 			    lockref_get_not_dead(&gl->gl_lockref))
1455 				examiner(gl);
1456 
1457 		rhashtable_walk_stop(&iter);
1458 	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1459 
1460 	rhashtable_walk_exit(&iter);
1461 }
1462 
1463 /**
1464  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1465  * @gl: The glock to thaw
1466  *
1467  */
1468 
thaw_glock(struct gfs2_glock * gl)1469 static void thaw_glock(struct gfs2_glock *gl)
1470 {
1471 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1472 		goto out;
1473 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1474 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1475 out:
1476 		gfs2_glock_put(gl);
1477 	}
1478 }
1479 
1480 /**
1481  * clear_glock - look at a glock and see if we can free it from glock cache
1482  * @gl: the glock to look at
1483  *
1484  */
1485 
clear_glock(struct gfs2_glock * gl)1486 static void clear_glock(struct gfs2_glock *gl)
1487 {
1488 	gfs2_glock_remove_from_lru(gl);
1489 
1490 	spin_lock(&gl->gl_lockref.lock);
1491 	if (gl->gl_state != LM_ST_UNLOCKED)
1492 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1493 	spin_unlock(&gl->gl_lockref.lock);
1494 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1495 		gfs2_glock_put(gl);
1496 }
1497 
1498 /**
1499  * gfs2_glock_thaw - Thaw any frozen glocks
1500  * @sdp: The super block
1501  *
1502  */
1503 
gfs2_glock_thaw(struct gfs2_sbd * sdp)1504 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1505 {
1506 	glock_hash_walk(thaw_glock, sdp);
1507 }
1508 
dump_glock(struct seq_file * seq,struct gfs2_glock * gl)1509 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1510 {
1511 	spin_lock(&gl->gl_lockref.lock);
1512 	gfs2_dump_glock(seq, gl);
1513 	spin_unlock(&gl->gl_lockref.lock);
1514 }
1515 
dump_glock_func(struct gfs2_glock * gl)1516 static void dump_glock_func(struct gfs2_glock *gl)
1517 {
1518 	dump_glock(NULL, gl);
1519 }
1520 
1521 /**
1522  * gfs2_gl_hash_clear - Empty out the glock hash table
1523  * @sdp: the filesystem
1524  * @wait: wait until it's all gone
1525  *
1526  * Called when unmounting the filesystem.
1527  */
1528 
gfs2_gl_hash_clear(struct gfs2_sbd * sdp)1529 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1530 {
1531 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1532 	flush_workqueue(glock_workqueue);
1533 	glock_hash_walk(clear_glock, sdp);
1534 	flush_workqueue(glock_workqueue);
1535 	wait_event_timeout(sdp->sd_glock_wait,
1536 			   atomic_read(&sdp->sd_glock_disposal) == 0,
1537 			   HZ * 600);
1538 	glock_hash_walk(dump_glock_func, sdp);
1539 }
1540 
gfs2_glock_finish_truncate(struct gfs2_inode * ip)1541 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1542 {
1543 	struct gfs2_glock *gl = ip->i_gl;
1544 	int ret;
1545 
1546 	ret = gfs2_truncatei_resume(ip);
1547 	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1548 
1549 	spin_lock(&gl->gl_lockref.lock);
1550 	clear_bit(GLF_LOCK, &gl->gl_flags);
1551 	run_queue(gl, 1);
1552 	spin_unlock(&gl->gl_lockref.lock);
1553 }
1554 
state2str(unsigned state)1555 static const char *state2str(unsigned state)
1556 {
1557 	switch(state) {
1558 	case LM_ST_UNLOCKED:
1559 		return "UN";
1560 	case LM_ST_SHARED:
1561 		return "SH";
1562 	case LM_ST_DEFERRED:
1563 		return "DF";
1564 	case LM_ST_EXCLUSIVE:
1565 		return "EX";
1566 	}
1567 	return "??";
1568 }
1569 
hflags2str(char * buf,u16 flags,unsigned long iflags)1570 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1571 {
1572 	char *p = buf;
1573 	if (flags & LM_FLAG_TRY)
1574 		*p++ = 't';
1575 	if (flags & LM_FLAG_TRY_1CB)
1576 		*p++ = 'T';
1577 	if (flags & LM_FLAG_NOEXP)
1578 		*p++ = 'e';
1579 	if (flags & LM_FLAG_ANY)
1580 		*p++ = 'A';
1581 	if (flags & LM_FLAG_PRIORITY)
1582 		*p++ = 'p';
1583 	if (flags & GL_ASYNC)
1584 		*p++ = 'a';
1585 	if (flags & GL_EXACT)
1586 		*p++ = 'E';
1587 	if (flags & GL_NOCACHE)
1588 		*p++ = 'c';
1589 	if (test_bit(HIF_HOLDER, &iflags))
1590 		*p++ = 'H';
1591 	if (test_bit(HIF_WAIT, &iflags))
1592 		*p++ = 'W';
1593 	if (test_bit(HIF_FIRST, &iflags))
1594 		*p++ = 'F';
1595 	*p = 0;
1596 	return buf;
1597 }
1598 
1599 /**
1600  * dump_holder - print information about a glock holder
1601  * @seq: the seq_file struct
1602  * @gh: the glock holder
1603  *
1604  */
1605 
dump_holder(struct seq_file * seq,const struct gfs2_holder * gh)1606 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1607 {
1608 	struct task_struct *gh_owner = NULL;
1609 	char flags_buf[32];
1610 
1611 	rcu_read_lock();
1612 	if (gh->gh_owner_pid)
1613 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1614 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1615 		       state2str(gh->gh_state),
1616 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1617 		       gh->gh_error,
1618 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1619 		       gh_owner ? gh_owner->comm : "(ended)",
1620 		       (void *)gh->gh_ip);
1621 	rcu_read_unlock();
1622 }
1623 
gflags2str(char * buf,const struct gfs2_glock * gl)1624 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1625 {
1626 	const unsigned long *gflags = &gl->gl_flags;
1627 	char *p = buf;
1628 
1629 	if (test_bit(GLF_LOCK, gflags))
1630 		*p++ = 'l';
1631 	if (test_bit(GLF_DEMOTE, gflags))
1632 		*p++ = 'D';
1633 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1634 		*p++ = 'd';
1635 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1636 		*p++ = 'p';
1637 	if (test_bit(GLF_DIRTY, gflags))
1638 		*p++ = 'y';
1639 	if (test_bit(GLF_LFLUSH, gflags))
1640 		*p++ = 'f';
1641 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1642 		*p++ = 'i';
1643 	if (test_bit(GLF_REPLY_PENDING, gflags))
1644 		*p++ = 'r';
1645 	if (test_bit(GLF_INITIAL, gflags))
1646 		*p++ = 'I';
1647 	if (test_bit(GLF_FROZEN, gflags))
1648 		*p++ = 'F';
1649 	if (test_bit(GLF_QUEUED, gflags))
1650 		*p++ = 'q';
1651 	if (test_bit(GLF_LRU, gflags))
1652 		*p++ = 'L';
1653 	if (gl->gl_object)
1654 		*p++ = 'o';
1655 	if (test_bit(GLF_BLOCKING, gflags))
1656 		*p++ = 'b';
1657 	*p = 0;
1658 	return buf;
1659 }
1660 
1661 /**
1662  * gfs2_dump_glock - print information about a glock
1663  * @seq: The seq_file struct
1664  * @gl: the glock
1665  *
1666  * The file format is as follows:
1667  * One line per object, capital letters are used to indicate objects
1668  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1669  * other objects are indented by a single space and follow the glock to
1670  * which they are related. Fields are indicated by lower case letters
1671  * followed by a colon and the field value, except for strings which are in
1672  * [] so that its possible to see if they are composed of spaces for
1673  * example. The field's are n = number (id of the object), f = flags,
1674  * t = type, s = state, r = refcount, e = error, p = pid.
1675  *
1676  */
1677 
gfs2_dump_glock(struct seq_file * seq,const struct gfs2_glock * gl)1678 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1679 {
1680 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1681 	unsigned long long dtime;
1682 	const struct gfs2_holder *gh;
1683 	char gflags_buf[32];
1684 
1685 	dtime = jiffies - gl->gl_demote_time;
1686 	dtime *= 1000000/HZ; /* demote time in uSec */
1687 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1688 		dtime = 0;
1689 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1690 		  state2str(gl->gl_state),
1691 		  gl->gl_name.ln_type,
1692 		  (unsigned long long)gl->gl_name.ln_number,
1693 		  gflags2str(gflags_buf, gl),
1694 		  state2str(gl->gl_target),
1695 		  state2str(gl->gl_demote_state), dtime,
1696 		  atomic_read(&gl->gl_ail_count),
1697 		  atomic_read(&gl->gl_revokes),
1698 		  (int)gl->gl_lockref.count, gl->gl_hold_time);
1699 
1700 	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1701 		dump_holder(seq, gh);
1702 
1703 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1704 		glops->go_dump(seq, gl);
1705 }
1706 
gfs2_glstats_seq_show(struct seq_file * seq,void * iter_ptr)1707 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1708 {
1709 	struct gfs2_glock *gl = iter_ptr;
1710 
1711 	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1712 		   gl->gl_name.ln_type,
1713 		   (unsigned long long)gl->gl_name.ln_number,
1714 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1715 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1716 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1717 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1718 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1719 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1720 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1721 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1722 	return 0;
1723 }
1724 
1725 static const char *gfs2_gltype[] = {
1726 	"type",
1727 	"reserved",
1728 	"nondisk",
1729 	"inode",
1730 	"rgrp",
1731 	"meta",
1732 	"iopen",
1733 	"flock",
1734 	"plock",
1735 	"quota",
1736 	"journal",
1737 };
1738 
1739 static const char *gfs2_stype[] = {
1740 	[GFS2_LKS_SRTT]		= "srtt",
1741 	[GFS2_LKS_SRTTVAR]	= "srttvar",
1742 	[GFS2_LKS_SRTTB]	= "srttb",
1743 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1744 	[GFS2_LKS_SIRT]		= "sirt",
1745 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1746 	[GFS2_LKS_DCOUNT]	= "dlm",
1747 	[GFS2_LKS_QCOUNT]	= "queue",
1748 };
1749 
1750 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1751 
gfs2_sbstats_seq_show(struct seq_file * seq,void * iter_ptr)1752 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1753 {
1754 	struct gfs2_sbd *sdp = seq->private;
1755 	loff_t pos = *(loff_t *)iter_ptr;
1756 	unsigned index = pos >> 3;
1757 	unsigned subindex = pos & 0x07;
1758 	int i;
1759 
1760 	if (index == 0 && subindex != 0)
1761 		return 0;
1762 
1763 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1764 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1765 
1766 	for_each_possible_cpu(i) {
1767                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1768 
1769 		if (index == 0)
1770 			seq_printf(seq, " %15u", i);
1771 		else
1772 			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1773 				   lkstats[index - 1].stats[subindex]);
1774 	}
1775 	seq_putc(seq, '\n');
1776 	return 0;
1777 }
1778 
gfs2_glock_init(void)1779 int __init gfs2_glock_init(void)
1780 {
1781 	int ret;
1782 
1783 	ret = rhashtable_init(&gl_hash_table, &ht_parms);
1784 	if (ret < 0)
1785 		return ret;
1786 
1787 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1788 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1789 	if (!glock_workqueue) {
1790 		rhashtable_destroy(&gl_hash_table);
1791 		return -ENOMEM;
1792 	}
1793 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1794 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1795 						0);
1796 	if (!gfs2_delete_workqueue) {
1797 		destroy_workqueue(glock_workqueue);
1798 		rhashtable_destroy(&gl_hash_table);
1799 		return -ENOMEM;
1800 	}
1801 
1802 	ret = register_shrinker(&glock_shrinker);
1803 	if (ret) {
1804 		destroy_workqueue(gfs2_delete_workqueue);
1805 		destroy_workqueue(glock_workqueue);
1806 		rhashtable_destroy(&gl_hash_table);
1807 		return ret;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
gfs2_glock_exit(void)1813 void gfs2_glock_exit(void)
1814 {
1815 	unregister_shrinker(&glock_shrinker);
1816 	rhashtable_destroy(&gl_hash_table);
1817 	destroy_workqueue(glock_workqueue);
1818 	destroy_workqueue(gfs2_delete_workqueue);
1819 }
1820 
gfs2_glock_iter_next(struct gfs2_glock_iter * gi)1821 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1822 {
1823 	while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
1824 		if (IS_ERR(gi->gl)) {
1825 			if (PTR_ERR(gi->gl) == -EAGAIN)
1826 				continue;
1827 			gi->gl = NULL;
1828 			return;
1829 		}
1830 		/* Skip entries for other sb and dead entries */
1831 		if (gi->sdp == gi->gl->gl_name.ln_sbd &&
1832 		    !__lockref_is_dead(&gi->gl->gl_lockref))
1833 			return;
1834 	}
1835 }
1836 
gfs2_glock_seq_start(struct seq_file * seq,loff_t * pos)1837 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1838 {
1839 	struct gfs2_glock_iter *gi = seq->private;
1840 	loff_t n = *pos;
1841 
1842 	rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1843 	if (rhashtable_walk_start(&gi->hti) != 0)
1844 		return NULL;
1845 
1846 	do {
1847 		gfs2_glock_iter_next(gi);
1848 	} while (gi->gl && n--);
1849 
1850 	gi->last_pos = *pos;
1851 
1852 	return gi->gl;
1853 }
1854 
gfs2_glock_seq_next(struct seq_file * seq,void * iter_ptr,loff_t * pos)1855 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1856 				 loff_t *pos)
1857 {
1858 	struct gfs2_glock_iter *gi = seq->private;
1859 
1860 	(*pos)++;
1861 	gi->last_pos = *pos;
1862 	gfs2_glock_iter_next(gi);
1863 
1864 	return gi->gl;
1865 }
1866 
gfs2_glock_seq_stop(struct seq_file * seq,void * iter_ptr)1867 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1868 {
1869 	struct gfs2_glock_iter *gi = seq->private;
1870 
1871 	gi->gl = NULL;
1872 	rhashtable_walk_stop(&gi->hti);
1873 	rhashtable_walk_exit(&gi->hti);
1874 }
1875 
gfs2_glock_seq_show(struct seq_file * seq,void * iter_ptr)1876 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1877 {
1878 	dump_glock(seq, iter_ptr);
1879 	return 0;
1880 }
1881 
gfs2_sbstats_seq_start(struct seq_file * seq,loff_t * pos)1882 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1883 {
1884 	preempt_disable();
1885 	if (*pos >= GFS2_NR_SBSTATS)
1886 		return NULL;
1887 	return pos;
1888 }
1889 
gfs2_sbstats_seq_next(struct seq_file * seq,void * iter_ptr,loff_t * pos)1890 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1891 				   loff_t *pos)
1892 {
1893 	(*pos)++;
1894 	if (*pos >= GFS2_NR_SBSTATS)
1895 		return NULL;
1896 	return pos;
1897 }
1898 
gfs2_sbstats_seq_stop(struct seq_file * seq,void * iter_ptr)1899 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1900 {
1901 	preempt_enable();
1902 }
1903 
1904 static const struct seq_operations gfs2_glock_seq_ops = {
1905 	.start = gfs2_glock_seq_start,
1906 	.next  = gfs2_glock_seq_next,
1907 	.stop  = gfs2_glock_seq_stop,
1908 	.show  = gfs2_glock_seq_show,
1909 };
1910 
1911 static const struct seq_operations gfs2_glstats_seq_ops = {
1912 	.start = gfs2_glock_seq_start,
1913 	.next  = gfs2_glock_seq_next,
1914 	.stop  = gfs2_glock_seq_stop,
1915 	.show  = gfs2_glstats_seq_show,
1916 };
1917 
1918 static const struct seq_operations gfs2_sbstats_seq_ops = {
1919 	.start = gfs2_sbstats_seq_start,
1920 	.next  = gfs2_sbstats_seq_next,
1921 	.stop  = gfs2_sbstats_seq_stop,
1922 	.show  = gfs2_sbstats_seq_show,
1923 };
1924 
1925 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1926 
gfs2_glocks_open(struct inode * inode,struct file * file)1927 static int gfs2_glocks_open(struct inode *inode, struct file *file)
1928 {
1929 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1930 				   sizeof(struct gfs2_glock_iter));
1931 	if (ret == 0) {
1932 		struct seq_file *seq = file->private_data;
1933 		struct gfs2_glock_iter *gi = seq->private;
1934 
1935 		gi->sdp = inode->i_private;
1936 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1937 		if (seq->buf)
1938 			seq->size = GFS2_SEQ_GOODSIZE;
1939 		gi->gl = NULL;
1940 	}
1941 	return ret;
1942 }
1943 
gfs2_glocks_release(struct inode * inode,struct file * file)1944 static int gfs2_glocks_release(struct inode *inode, struct file *file)
1945 {
1946 	struct seq_file *seq = file->private_data;
1947 	struct gfs2_glock_iter *gi = seq->private;
1948 
1949 	gi->gl = NULL;
1950 	return seq_release_private(inode, file);
1951 }
1952 
gfs2_glstats_open(struct inode * inode,struct file * file)1953 static int gfs2_glstats_open(struct inode *inode, struct file *file)
1954 {
1955 	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1956 				   sizeof(struct gfs2_glock_iter));
1957 	if (ret == 0) {
1958 		struct seq_file *seq = file->private_data;
1959 		struct gfs2_glock_iter *gi = seq->private;
1960 		gi->sdp = inode->i_private;
1961 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1962 		if (seq->buf)
1963 			seq->size = GFS2_SEQ_GOODSIZE;
1964 		gi->gl = NULL;
1965 	}
1966 	return ret;
1967 }
1968 
gfs2_sbstats_open(struct inode * inode,struct file * file)1969 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1970 {
1971 	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
1972 	if (ret == 0) {
1973 		struct seq_file *seq = file->private_data;
1974 		seq->private = inode->i_private;  /* sdp */
1975 	}
1976 	return ret;
1977 }
1978 
1979 static const struct file_operations gfs2_glocks_fops = {
1980 	.owner   = THIS_MODULE,
1981 	.open    = gfs2_glocks_open,
1982 	.read    = seq_read,
1983 	.llseek  = seq_lseek,
1984 	.release = gfs2_glocks_release,
1985 };
1986 
1987 static const struct file_operations gfs2_glstats_fops = {
1988 	.owner   = THIS_MODULE,
1989 	.open    = gfs2_glstats_open,
1990 	.read    = seq_read,
1991 	.llseek  = seq_lseek,
1992 	.release = gfs2_glocks_release,
1993 };
1994 
1995 static const struct file_operations gfs2_sbstats_fops = {
1996 	.owner   = THIS_MODULE,
1997 	.open	 = gfs2_sbstats_open,
1998 	.read    = seq_read,
1999 	.llseek  = seq_lseek,
2000 	.release = seq_release,
2001 };
2002 
gfs2_create_debugfs_file(struct gfs2_sbd * sdp)2003 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2004 {
2005 	struct dentry *dent;
2006 
2007 	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2008 	if (IS_ERR_OR_NULL(dent))
2009 		goto fail;
2010 	sdp->debugfs_dir = dent;
2011 
2012 	dent = debugfs_create_file("glocks",
2013 				   S_IFREG | S_IRUGO,
2014 				   sdp->debugfs_dir, sdp,
2015 				   &gfs2_glocks_fops);
2016 	if (IS_ERR_OR_NULL(dent))
2017 		goto fail;
2018 	sdp->debugfs_dentry_glocks = dent;
2019 
2020 	dent = debugfs_create_file("glstats",
2021 				   S_IFREG | S_IRUGO,
2022 				   sdp->debugfs_dir, sdp,
2023 				   &gfs2_glstats_fops);
2024 	if (IS_ERR_OR_NULL(dent))
2025 		goto fail;
2026 	sdp->debugfs_dentry_glstats = dent;
2027 
2028 	dent = debugfs_create_file("sbstats",
2029 				   S_IFREG | S_IRUGO,
2030 				   sdp->debugfs_dir, sdp,
2031 				   &gfs2_sbstats_fops);
2032 	if (IS_ERR_OR_NULL(dent))
2033 		goto fail;
2034 	sdp->debugfs_dentry_sbstats = dent;
2035 
2036 	return 0;
2037 fail:
2038 	gfs2_delete_debugfs_file(sdp);
2039 	return dent ? PTR_ERR(dent) : -ENOMEM;
2040 }
2041 
gfs2_delete_debugfs_file(struct gfs2_sbd * sdp)2042 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2043 {
2044 	if (sdp->debugfs_dir) {
2045 		if (sdp->debugfs_dentry_glocks) {
2046 			debugfs_remove(sdp->debugfs_dentry_glocks);
2047 			sdp->debugfs_dentry_glocks = NULL;
2048 		}
2049 		if (sdp->debugfs_dentry_glstats) {
2050 			debugfs_remove(sdp->debugfs_dentry_glstats);
2051 			sdp->debugfs_dentry_glstats = NULL;
2052 		}
2053 		if (sdp->debugfs_dentry_sbstats) {
2054 			debugfs_remove(sdp->debugfs_dentry_sbstats);
2055 			sdp->debugfs_dentry_sbstats = NULL;
2056 		}
2057 		debugfs_remove(sdp->debugfs_dir);
2058 		sdp->debugfs_dir = NULL;
2059 	}
2060 }
2061 
gfs2_register_debugfs(void)2062 int gfs2_register_debugfs(void)
2063 {
2064 	gfs2_root = debugfs_create_dir("gfs2", NULL);
2065 	if (IS_ERR(gfs2_root))
2066 		return PTR_ERR(gfs2_root);
2067 	return gfs2_root ? 0 : -ENOMEM;
2068 }
2069 
gfs2_unregister_debugfs(void)2070 void gfs2_unregister_debugfs(void)
2071 {
2072 	debugfs_remove(gfs2_root);
2073 	gfs2_root = NULL;
2074 }
2075