1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * dlmthread.c
6 *
7 * standalone DLM module
8 *
9 * Copyright (C) 2004 Oracle. All rights reserved.
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/timer.h>
24 #include <linux/kthread.h>
25 #include <linux/delay.h>
26
27
28 #include "../cluster/heartbeat.h"
29 #include "../cluster/nodemanager.h"
30 #include "../cluster/tcp.h"
31
32 #include "dlmapi.h"
33 #include "dlmcommon.h"
34 #include "dlmdomain.h"
35
36 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
37 #include "../cluster/masklog.h"
38
39 static int dlm_thread(void *data);
40 static void dlm_flush_asts(struct dlm_ctxt *dlm);
41
42 /* will exit holding res->spinlock, but may drop in function */
43 /* waits until flags are cleared on res->state */
__dlm_wait_on_lockres_flags(struct dlm_lock_resource * res,int flags)44 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
45 {
46 DECLARE_WAITQUEUE(wait, current);
47
48 assert_spin_locked(&res->spinlock);
49
50 add_wait_queue(&res->wq, &wait);
51 repeat:
52 set_current_state(TASK_UNINTERRUPTIBLE);
53 if (res->state & flags) {
54 spin_unlock(&res->spinlock);
55 schedule();
56 spin_lock(&res->spinlock);
57 goto repeat;
58 }
59 remove_wait_queue(&res->wq, &wait);
60 __set_current_state(TASK_RUNNING);
61 }
62
__dlm_lockres_has_locks(struct dlm_lock_resource * res)63 int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
64 {
65 if (list_empty(&res->granted) &&
66 list_empty(&res->converting) &&
67 list_empty(&res->blocked))
68 return 0;
69 return 1;
70 }
71
72 /* "unused": the lockres has no locks, is not on the dirty list,
73 * has no inflight locks (in the gap between mastery and acquiring
74 * the first lock), and has no bits in its refmap.
75 * truly ready to be freed. */
__dlm_lockres_unused(struct dlm_lock_resource * res)76 int __dlm_lockres_unused(struct dlm_lock_resource *res)
77 {
78 int bit;
79
80 assert_spin_locked(&res->spinlock);
81
82 if (__dlm_lockres_has_locks(res))
83 return 0;
84
85 /* Locks are in the process of being created */
86 if (res->inflight_locks)
87 return 0;
88
89 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
90 return 0;
91
92 if (res->state & (DLM_LOCK_RES_RECOVERING|
93 DLM_LOCK_RES_RECOVERY_WAITING))
94 return 0;
95
96 /* Another node has this resource with this node as the master */
97 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
98 if (bit < O2NM_MAX_NODES)
99 return 0;
100
101 return 1;
102 }
103
104
105 /* Call whenever you may have added or deleted something from one of
106 * the lockres queue's. This will figure out whether it belongs on the
107 * unused list or not and does the appropriate thing. */
__dlm_lockres_calc_usage(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)108 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
109 struct dlm_lock_resource *res)
110 {
111 assert_spin_locked(&dlm->spinlock);
112 assert_spin_locked(&res->spinlock);
113
114 if (__dlm_lockres_unused(res)){
115 if (list_empty(&res->purge)) {
116 mlog(0, "%s: Adding res %.*s to purge list\n",
117 dlm->name, res->lockname.len, res->lockname.name);
118
119 res->last_used = jiffies;
120 dlm_lockres_get(res);
121 list_add_tail(&res->purge, &dlm->purge_list);
122 dlm->purge_count++;
123 }
124 } else if (!list_empty(&res->purge)) {
125 mlog(0, "%s: Removing res %.*s from purge list\n",
126 dlm->name, res->lockname.len, res->lockname.name);
127
128 list_del_init(&res->purge);
129 dlm_lockres_put(res);
130 dlm->purge_count--;
131 }
132 }
133
dlm_lockres_calc_usage(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)134 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
135 struct dlm_lock_resource *res)
136 {
137 spin_lock(&dlm->spinlock);
138 spin_lock(&res->spinlock);
139
140 __dlm_lockres_calc_usage(dlm, res);
141
142 spin_unlock(&res->spinlock);
143 spin_unlock(&dlm->spinlock);
144 }
145
146 /*
147 * Do the real purge work:
148 * unhash the lockres, and
149 * clear flag DLM_LOCK_RES_DROPPING_REF.
150 * It requires dlm and lockres spinlock to be taken.
151 */
__dlm_do_purge_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)152 void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
153 struct dlm_lock_resource *res)
154 {
155 assert_spin_locked(&dlm->spinlock);
156 assert_spin_locked(&res->spinlock);
157
158 if (!list_empty(&res->purge)) {
159 mlog(0, "%s: Removing res %.*s from purgelist\n",
160 dlm->name, res->lockname.len, res->lockname.name);
161 list_del_init(&res->purge);
162 dlm_lockres_put(res);
163 dlm->purge_count--;
164 }
165
166 if (!__dlm_lockres_unused(res)) {
167 mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
168 dlm->name, res->lockname.len, res->lockname.name);
169 __dlm_print_one_lock_resource(res);
170 BUG();
171 }
172
173 __dlm_unhash_lockres(dlm, res);
174
175 spin_lock(&dlm->track_lock);
176 if (!list_empty(&res->tracking))
177 list_del_init(&res->tracking);
178 else {
179 mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
180 dlm->name, res->lockname.len, res->lockname.name);
181 __dlm_print_one_lock_resource(res);
182 }
183 spin_unlock(&dlm->track_lock);
184
185 /*
186 * lockres is not in the hash now. drop the flag and wake up
187 * any processes waiting in dlm_get_lock_resource.
188 */
189 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
190 }
191
dlm_purge_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)192 static void dlm_purge_lockres(struct dlm_ctxt *dlm,
193 struct dlm_lock_resource *res)
194 {
195 int master;
196 int ret = 0;
197
198 assert_spin_locked(&dlm->spinlock);
199 assert_spin_locked(&res->spinlock);
200
201 master = (res->owner == dlm->node_num);
202
203 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
204 res->lockname.len, res->lockname.name, master);
205
206 if (!master) {
207 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
208 mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
209 dlm->name, res->lockname.len, res->lockname.name);
210 spin_unlock(&res->spinlock);
211 return;
212 }
213
214 res->state |= DLM_LOCK_RES_DROPPING_REF;
215 /* drop spinlock... retake below */
216 spin_unlock(&res->spinlock);
217 spin_unlock(&dlm->spinlock);
218
219 spin_lock(&res->spinlock);
220 /* This ensures that clear refmap is sent after the set */
221 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
222 spin_unlock(&res->spinlock);
223
224 /* clear our bit from the master's refmap, ignore errors */
225 ret = dlm_drop_lockres_ref(dlm, res);
226 if (ret < 0) {
227 if (!dlm_is_host_down(ret))
228 BUG();
229 }
230 spin_lock(&dlm->spinlock);
231 spin_lock(&res->spinlock);
232 }
233
234 if (!list_empty(&res->purge)) {
235 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
236 dlm->name, res->lockname.len, res->lockname.name, master);
237 list_del_init(&res->purge);
238 dlm_lockres_put(res);
239 dlm->purge_count--;
240 }
241
242 if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
243 mlog(0, "%s: deref %.*s in progress\n",
244 dlm->name, res->lockname.len, res->lockname.name);
245 spin_unlock(&res->spinlock);
246 return;
247 }
248
249 if (!__dlm_lockres_unused(res)) {
250 mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
251 dlm->name, res->lockname.len, res->lockname.name);
252 __dlm_print_one_lock_resource(res);
253 BUG();
254 }
255
256 __dlm_unhash_lockres(dlm, res);
257
258 spin_lock(&dlm->track_lock);
259 if (!list_empty(&res->tracking))
260 list_del_init(&res->tracking);
261 else {
262 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
263 res->lockname.len, res->lockname.name);
264 __dlm_print_one_lock_resource(res);
265 }
266 spin_unlock(&dlm->track_lock);
267
268 /* lockres is not in the hash now. drop the flag and wake up
269 * any processes waiting in dlm_get_lock_resource. */
270 if (!master) {
271 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
272 spin_unlock(&res->spinlock);
273 wake_up(&res->wq);
274 } else
275 spin_unlock(&res->spinlock);
276 }
277
dlm_run_purge_list(struct dlm_ctxt * dlm,int purge_now)278 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
279 int purge_now)
280 {
281 unsigned int run_max, unused;
282 unsigned long purge_jiffies;
283 struct dlm_lock_resource *lockres;
284
285 spin_lock(&dlm->spinlock);
286 run_max = dlm->purge_count;
287
288 while(run_max && !list_empty(&dlm->purge_list)) {
289 run_max--;
290
291 lockres = list_entry(dlm->purge_list.next,
292 struct dlm_lock_resource, purge);
293
294 spin_lock(&lockres->spinlock);
295
296 purge_jiffies = lockres->last_used +
297 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
298
299 /* Make sure that we want to be processing this guy at
300 * this time. */
301 if (!purge_now && time_after(purge_jiffies, jiffies)) {
302 /* Since resources are added to the purge list
303 * in tail order, we can stop at the first
304 * unpurgable resource -- anyone added after
305 * him will have a greater last_used value */
306 spin_unlock(&lockres->spinlock);
307 break;
308 }
309
310 /* Status of the lockres *might* change so double
311 * check. If the lockres is unused, holding the dlm
312 * spinlock will prevent people from getting and more
313 * refs on it. */
314 unused = __dlm_lockres_unused(lockres);
315 if (!unused ||
316 (lockres->state & DLM_LOCK_RES_MIGRATING) ||
317 (lockres->inflight_assert_workers != 0)) {
318 mlog(0, "%s: res %.*s is in use or being remastered, "
319 "used %d, state %d, assert master workers %u\n",
320 dlm->name, lockres->lockname.len,
321 lockres->lockname.name,
322 !unused, lockres->state,
323 lockres->inflight_assert_workers);
324 list_move_tail(&lockres->purge, &dlm->purge_list);
325 spin_unlock(&lockres->spinlock);
326 continue;
327 }
328
329 dlm_lockres_get(lockres);
330
331 dlm_purge_lockres(dlm, lockres);
332
333 dlm_lockres_put(lockres);
334
335 /* Avoid adding any scheduling latencies */
336 cond_resched_lock(&dlm->spinlock);
337 }
338
339 spin_unlock(&dlm->spinlock);
340 }
341
dlm_shuffle_lists(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)342 static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
343 struct dlm_lock_resource *res)
344 {
345 struct dlm_lock *lock, *target;
346 int can_grant = 1;
347
348 /*
349 * Because this function is called with the lockres
350 * spinlock, and because we know that it is not migrating/
351 * recovering/in-progress, it is fine to reserve asts and
352 * basts right before queueing them all throughout
353 */
354 assert_spin_locked(&dlm->ast_lock);
355 assert_spin_locked(&res->spinlock);
356 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
357 DLM_LOCK_RES_RECOVERING|
358 DLM_LOCK_RES_IN_PROGRESS)));
359
360 converting:
361 if (list_empty(&res->converting))
362 goto blocked;
363 mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
364 res->lockname.len, res->lockname.name);
365
366 target = list_entry(res->converting.next, struct dlm_lock, list);
367 if (target->ml.convert_type == LKM_IVMODE) {
368 mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
369 dlm->name, res->lockname.len, res->lockname.name);
370 BUG();
371 }
372 list_for_each_entry(lock, &res->granted, list) {
373 if (lock==target)
374 continue;
375 if (!dlm_lock_compatible(lock->ml.type,
376 target->ml.convert_type)) {
377 can_grant = 0;
378 /* queue the BAST if not already */
379 if (lock->ml.highest_blocked == LKM_IVMODE) {
380 __dlm_lockres_reserve_ast(res);
381 __dlm_queue_bast(dlm, lock);
382 }
383 /* update the highest_blocked if needed */
384 if (lock->ml.highest_blocked < target->ml.convert_type)
385 lock->ml.highest_blocked =
386 target->ml.convert_type;
387 }
388 }
389
390 list_for_each_entry(lock, &res->converting, list) {
391 if (lock==target)
392 continue;
393 if (!dlm_lock_compatible(lock->ml.type,
394 target->ml.convert_type)) {
395 can_grant = 0;
396 if (lock->ml.highest_blocked == LKM_IVMODE) {
397 __dlm_lockres_reserve_ast(res);
398 __dlm_queue_bast(dlm, lock);
399 }
400 if (lock->ml.highest_blocked < target->ml.convert_type)
401 lock->ml.highest_blocked =
402 target->ml.convert_type;
403 }
404 }
405
406 /* we can convert the lock */
407 if (can_grant) {
408 spin_lock(&target->spinlock);
409 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
410
411 mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
412 "%d => %d, node %u\n", dlm->name, res->lockname.len,
413 res->lockname.name,
414 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
415 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
416 target->ml.type,
417 target->ml.convert_type, target->ml.node);
418
419 target->ml.type = target->ml.convert_type;
420 target->ml.convert_type = LKM_IVMODE;
421 list_move_tail(&target->list, &res->granted);
422
423 BUG_ON(!target->lksb);
424 target->lksb->status = DLM_NORMAL;
425
426 spin_unlock(&target->spinlock);
427
428 __dlm_lockres_reserve_ast(res);
429 __dlm_queue_ast(dlm, target);
430 /* go back and check for more */
431 goto converting;
432 }
433
434 blocked:
435 if (list_empty(&res->blocked))
436 goto leave;
437 target = list_entry(res->blocked.next, struct dlm_lock, list);
438
439 list_for_each_entry(lock, &res->granted, list) {
440 if (lock==target)
441 continue;
442 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
443 can_grant = 0;
444 if (lock->ml.highest_blocked == LKM_IVMODE) {
445 __dlm_lockres_reserve_ast(res);
446 __dlm_queue_bast(dlm, lock);
447 }
448 if (lock->ml.highest_blocked < target->ml.type)
449 lock->ml.highest_blocked = target->ml.type;
450 }
451 }
452
453 list_for_each_entry(lock, &res->converting, list) {
454 if (lock==target)
455 continue;
456 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
457 can_grant = 0;
458 if (lock->ml.highest_blocked == LKM_IVMODE) {
459 __dlm_lockres_reserve_ast(res);
460 __dlm_queue_bast(dlm, lock);
461 }
462 if (lock->ml.highest_blocked < target->ml.type)
463 lock->ml.highest_blocked = target->ml.type;
464 }
465 }
466
467 /* we can grant the blocked lock (only
468 * possible if converting list empty) */
469 if (can_grant) {
470 spin_lock(&target->spinlock);
471 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
472
473 mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
474 "node %u\n", dlm->name, res->lockname.len,
475 res->lockname.name,
476 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
477 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
478 target->ml.type, target->ml.node);
479
480 /* target->ml.type is already correct */
481 list_move_tail(&target->list, &res->granted);
482
483 BUG_ON(!target->lksb);
484 target->lksb->status = DLM_NORMAL;
485
486 spin_unlock(&target->spinlock);
487
488 __dlm_lockres_reserve_ast(res);
489 __dlm_queue_ast(dlm, target);
490 /* go back and check for more */
491 goto converting;
492 }
493
494 leave:
495 return;
496 }
497
498 /* must have NO locks when calling this with res !=NULL * */
dlm_kick_thread(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)499 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
500 {
501 if (res) {
502 spin_lock(&dlm->spinlock);
503 spin_lock(&res->spinlock);
504 __dlm_dirty_lockres(dlm, res);
505 spin_unlock(&res->spinlock);
506 spin_unlock(&dlm->spinlock);
507 }
508 wake_up(&dlm->dlm_thread_wq);
509 }
510
__dlm_dirty_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)511 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
512 {
513 assert_spin_locked(&dlm->spinlock);
514 assert_spin_locked(&res->spinlock);
515
516 /* don't shuffle secondary queues */
517 if (res->owner == dlm->node_num) {
518 if (res->state & (DLM_LOCK_RES_MIGRATING |
519 DLM_LOCK_RES_BLOCK_DIRTY))
520 return;
521
522 if (list_empty(&res->dirty)) {
523 /* ref for dirty_list */
524 dlm_lockres_get(res);
525 list_add_tail(&res->dirty, &dlm->dirty_list);
526 res->state |= DLM_LOCK_RES_DIRTY;
527 }
528 }
529
530 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
531 res->lockname.name);
532 }
533
534
535 /* Launch the NM thread for the mounted volume */
dlm_launch_thread(struct dlm_ctxt * dlm)536 int dlm_launch_thread(struct dlm_ctxt *dlm)
537 {
538 mlog(0, "Starting dlm_thread...\n");
539
540 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
541 dlm->name);
542 if (IS_ERR(dlm->dlm_thread_task)) {
543 mlog_errno(PTR_ERR(dlm->dlm_thread_task));
544 dlm->dlm_thread_task = NULL;
545 return -EINVAL;
546 }
547
548 return 0;
549 }
550
dlm_complete_thread(struct dlm_ctxt * dlm)551 void dlm_complete_thread(struct dlm_ctxt *dlm)
552 {
553 if (dlm->dlm_thread_task) {
554 mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
555 kthread_stop(dlm->dlm_thread_task);
556 dlm->dlm_thread_task = NULL;
557 }
558 }
559
dlm_dirty_list_empty(struct dlm_ctxt * dlm)560 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
561 {
562 int empty;
563
564 spin_lock(&dlm->spinlock);
565 empty = list_empty(&dlm->dirty_list);
566 spin_unlock(&dlm->spinlock);
567
568 return empty;
569 }
570
dlm_flush_asts(struct dlm_ctxt * dlm)571 static void dlm_flush_asts(struct dlm_ctxt *dlm)
572 {
573 int ret;
574 struct dlm_lock *lock;
575 struct dlm_lock_resource *res;
576 u8 hi;
577
578 spin_lock(&dlm->ast_lock);
579 while (!list_empty(&dlm->pending_asts)) {
580 lock = list_entry(dlm->pending_asts.next,
581 struct dlm_lock, ast_list);
582 /* get an extra ref on lock */
583 dlm_lock_get(lock);
584 res = lock->lockres;
585 mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
586 "node %u\n", dlm->name, res->lockname.len,
587 res->lockname.name,
588 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
589 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
590 lock->ml.type, lock->ml.node);
591
592 BUG_ON(!lock->ast_pending);
593
594 /* remove from list (including ref) */
595 list_del_init(&lock->ast_list);
596 dlm_lock_put(lock);
597 spin_unlock(&dlm->ast_lock);
598
599 if (lock->ml.node != dlm->node_num) {
600 ret = dlm_do_remote_ast(dlm, res, lock);
601 if (ret < 0)
602 mlog_errno(ret);
603 } else
604 dlm_do_local_ast(dlm, res, lock);
605
606 spin_lock(&dlm->ast_lock);
607
608 /* possible that another ast was queued while
609 * we were delivering the last one */
610 if (!list_empty(&lock->ast_list)) {
611 mlog(0, "%s: res %.*s, AST queued while flushing last "
612 "one\n", dlm->name, res->lockname.len,
613 res->lockname.name);
614 } else
615 lock->ast_pending = 0;
616
617 /* drop the extra ref.
618 * this may drop it completely. */
619 dlm_lock_put(lock);
620 dlm_lockres_release_ast(dlm, res);
621 }
622
623 while (!list_empty(&dlm->pending_basts)) {
624 lock = list_entry(dlm->pending_basts.next,
625 struct dlm_lock, bast_list);
626 /* get an extra ref on lock */
627 dlm_lock_get(lock);
628 res = lock->lockres;
629
630 BUG_ON(!lock->bast_pending);
631
632 /* get the highest blocked lock, and reset */
633 spin_lock(&lock->spinlock);
634 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
635 hi = lock->ml.highest_blocked;
636 lock->ml.highest_blocked = LKM_IVMODE;
637 spin_unlock(&lock->spinlock);
638
639 /* remove from list (including ref) */
640 list_del_init(&lock->bast_list);
641 dlm_lock_put(lock);
642 spin_unlock(&dlm->ast_lock);
643
644 mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
645 "blocked %d, node %u\n",
646 dlm->name, res->lockname.len, res->lockname.name,
647 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
648 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
649 hi, lock->ml.node);
650
651 if (lock->ml.node != dlm->node_num) {
652 ret = dlm_send_proxy_bast(dlm, res, lock, hi);
653 if (ret < 0)
654 mlog_errno(ret);
655 } else
656 dlm_do_local_bast(dlm, res, lock, hi);
657
658 spin_lock(&dlm->ast_lock);
659
660 /* possible that another bast was queued while
661 * we were delivering the last one */
662 if (!list_empty(&lock->bast_list)) {
663 mlog(0, "%s: res %.*s, BAST queued while flushing last "
664 "one\n", dlm->name, res->lockname.len,
665 res->lockname.name);
666 } else
667 lock->bast_pending = 0;
668
669 /* drop the extra ref.
670 * this may drop it completely. */
671 dlm_lock_put(lock);
672 dlm_lockres_release_ast(dlm, res);
673 }
674 wake_up(&dlm->ast_wq);
675 spin_unlock(&dlm->ast_lock);
676 }
677
678
679 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
680 #define DLM_THREAD_MAX_DIRTY 100
681
dlm_thread(void * data)682 static int dlm_thread(void *data)
683 {
684 struct dlm_lock_resource *res;
685 struct dlm_ctxt *dlm = data;
686 unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
687
688 mlog(0, "dlm thread running for %s...\n", dlm->name);
689
690 while (!kthread_should_stop()) {
691 int n = DLM_THREAD_MAX_DIRTY;
692
693 /* dlm_shutting_down is very point-in-time, but that
694 * doesn't matter as we'll just loop back around if we
695 * get false on the leading edge of a state
696 * transition. */
697 dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
698
699 /* We really don't want to hold dlm->spinlock while
700 * calling dlm_shuffle_lists on each lockres that
701 * needs to have its queues adjusted and AST/BASTs
702 * run. So let's pull each entry off the dirty_list
703 * and drop dlm->spinlock ASAP. Once off the list,
704 * res->spinlock needs to be taken again to protect
705 * the queues while calling dlm_shuffle_lists. */
706 spin_lock(&dlm->spinlock);
707 while (!list_empty(&dlm->dirty_list)) {
708 int delay = 0;
709 res = list_entry(dlm->dirty_list.next,
710 struct dlm_lock_resource, dirty);
711
712 /* peel a lockres off, remove it from the list,
713 * unset the dirty flag and drop the dlm lock */
714 BUG_ON(!res);
715 dlm_lockres_get(res);
716
717 spin_lock(&res->spinlock);
718 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
719 list_del_init(&res->dirty);
720 spin_unlock(&res->spinlock);
721 spin_unlock(&dlm->spinlock);
722 /* Drop dirty_list ref */
723 dlm_lockres_put(res);
724
725 /* lockres can be re-dirtied/re-added to the
726 * dirty_list in this gap, but that is ok */
727
728 spin_lock(&dlm->ast_lock);
729 spin_lock(&res->spinlock);
730 if (res->owner != dlm->node_num) {
731 __dlm_print_one_lock_resource(res);
732 mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
733 " dirty %d\n", dlm->name,
734 !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
735 !!(res->state & DLM_LOCK_RES_MIGRATING),
736 !!(res->state & DLM_LOCK_RES_RECOVERING),
737 !!(res->state & DLM_LOCK_RES_DIRTY));
738 }
739 BUG_ON(res->owner != dlm->node_num);
740
741 /* it is now ok to move lockreses in these states
742 * to the dirty list, assuming that they will only be
743 * dirty for a short while. */
744 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
745 if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
746 DLM_LOCK_RES_RECOVERING |
747 DLM_LOCK_RES_RECOVERY_WAITING)) {
748 /* move it to the tail and keep going */
749 res->state &= ~DLM_LOCK_RES_DIRTY;
750 spin_unlock(&res->spinlock);
751 spin_unlock(&dlm->ast_lock);
752 mlog(0, "%s: res %.*s, inprogress, delay list "
753 "shuffle, state %d\n", dlm->name,
754 res->lockname.len, res->lockname.name,
755 res->state);
756 delay = 1;
757 goto in_progress;
758 }
759
760 /* at this point the lockres is not migrating/
761 * recovering/in-progress. we have the lockres
762 * spinlock and do NOT have the dlm lock.
763 * safe to reserve/queue asts and run the lists. */
764
765 /* called while holding lockres lock */
766 dlm_shuffle_lists(dlm, res);
767 res->state &= ~DLM_LOCK_RES_DIRTY;
768 spin_unlock(&res->spinlock);
769 spin_unlock(&dlm->ast_lock);
770
771 dlm_lockres_calc_usage(dlm, res);
772
773 in_progress:
774
775 spin_lock(&dlm->spinlock);
776 /* if the lock was in-progress, stick
777 * it on the back of the list */
778 if (delay) {
779 spin_lock(&res->spinlock);
780 __dlm_dirty_lockres(dlm, res);
781 spin_unlock(&res->spinlock);
782 }
783 dlm_lockres_put(res);
784
785 /* unlikely, but we may need to give time to
786 * other tasks */
787 if (!--n) {
788 mlog(0, "%s: Throttling dlm thread\n",
789 dlm->name);
790 break;
791 }
792 }
793
794 spin_unlock(&dlm->spinlock);
795 dlm_flush_asts(dlm);
796
797 /* yield and continue right away if there is more work to do */
798 if (!n) {
799 cond_resched();
800 continue;
801 }
802
803 wait_event_interruptible_timeout(dlm->dlm_thread_wq,
804 !dlm_dirty_list_empty(dlm) ||
805 kthread_should_stop(),
806 timeout);
807 }
808
809 mlog(0, "quitting DLM thread\n");
810 return 0;
811 }
812