1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
54
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56
57 static int dlm_recovery_thread(void *data);
58 static int dlm_do_recovery(struct dlm_ctxt *dlm);
59
60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
63 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
64 u8 request_from, u8 dead_node);
65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66
67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
69 const char *lockname, int namelen,
70 int total_locks, u64 cookie,
71 u8 flags, u8 master);
72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
73 struct dlm_migratable_lockres *mres,
74 u8 send_to,
75 struct dlm_lock_resource *res,
76 int total_locks);
77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
78 struct dlm_lock_resource *res,
79 struct dlm_migratable_lockres *mres);
80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
82 u8 dead_node, u8 send_to);
83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
85 struct list_head *list, u8 dead_node);
86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
87 u8 dead_node, u8 new_master);
88 static void dlm_reco_ast(void *astdata);
89 static void dlm_reco_bast(void *astdata, int blocked_type);
90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
91 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
92 void *data);
93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
95 struct dlm_lock_resource *res,
96 u8 *real_master);
97
98 static u64 dlm_get_next_mig_cookie(void);
99
100 static DEFINE_SPINLOCK(dlm_reco_state_lock);
101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102 static u64 dlm_mig_cookie = 1;
103
dlm_get_next_mig_cookie(void)104 static u64 dlm_get_next_mig_cookie(void)
105 {
106 u64 c;
107 spin_lock(&dlm_mig_cookie_lock);
108 c = dlm_mig_cookie;
109 if (dlm_mig_cookie == (~0ULL))
110 dlm_mig_cookie = 1;
111 else
112 dlm_mig_cookie++;
113 spin_unlock(&dlm_mig_cookie_lock);
114 return c;
115 }
116
dlm_set_reco_dead_node(struct dlm_ctxt * dlm,u8 dead_node)117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
118 u8 dead_node)
119 {
120 assert_spin_locked(&dlm->spinlock);
121 if (dlm->reco.dead_node != dead_node)
122 mlog(0, "%s: changing dead_node from %u to %u\n",
123 dlm->name, dlm->reco.dead_node, dead_node);
124 dlm->reco.dead_node = dead_node;
125 }
126
dlm_set_reco_master(struct dlm_ctxt * dlm,u8 master)127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
128 u8 master)
129 {
130 assert_spin_locked(&dlm->spinlock);
131 mlog(0, "%s: changing new_master from %u to %u\n",
132 dlm->name, dlm->reco.new_master, master);
133 dlm->reco.new_master = master;
134 }
135
__dlm_reset_recovery(struct dlm_ctxt * dlm)136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
137 {
138 assert_spin_locked(&dlm->spinlock);
139 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
141 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
142 }
143
dlm_reset_recovery(struct dlm_ctxt * dlm)144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
145 {
146 spin_lock(&dlm->spinlock);
147 __dlm_reset_recovery(dlm);
148 spin_unlock(&dlm->spinlock);
149 }
150
151 /* Worker function used during recovery. */
dlm_dispatch_work(struct work_struct * work)152 void dlm_dispatch_work(struct work_struct *work)
153 {
154 struct dlm_ctxt *dlm =
155 container_of(work, struct dlm_ctxt, dispatched_work);
156 LIST_HEAD(tmp_list);
157 struct dlm_work_item *item, *next;
158 dlm_workfunc_t *workfunc;
159 int tot=0;
160
161 spin_lock(&dlm->work_lock);
162 list_splice_init(&dlm->work_list, &tmp_list);
163 spin_unlock(&dlm->work_lock);
164
165 list_for_each_entry(item, &tmp_list, list) {
166 tot++;
167 }
168 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
169
170 list_for_each_entry_safe(item, next, &tmp_list, list) {
171 workfunc = item->func;
172 list_del_init(&item->list);
173
174 /* already have ref on dlm to avoid having
175 * it disappear. just double-check. */
176 BUG_ON(item->dlm != dlm);
177
178 /* this is allowed to sleep and
179 * call network stuff */
180 workfunc(item, item->data);
181
182 dlm_put(dlm);
183 kfree(item);
184 }
185 }
186
187 /*
188 * RECOVERY THREAD
189 */
190
dlm_kick_recovery_thread(struct dlm_ctxt * dlm)191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
192 {
193 /* wake the recovery thread
194 * this will wake the reco thread in one of three places
195 * 1) sleeping with no recovery happening
196 * 2) sleeping with recovery mastered elsewhere
197 * 3) recovery mastered here, waiting on reco data */
198
199 wake_up(&dlm->dlm_reco_thread_wq);
200 }
201
202 /* Launch the recovery thread */
dlm_launch_recovery_thread(struct dlm_ctxt * dlm)203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
204 {
205 mlog(0, "starting dlm recovery thread...\n");
206
207 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208 "dlm_reco-%s", dlm->name);
209 if (IS_ERR(dlm->dlm_reco_thread_task)) {
210 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
211 dlm->dlm_reco_thread_task = NULL;
212 return -EINVAL;
213 }
214
215 return 0;
216 }
217
dlm_complete_recovery_thread(struct dlm_ctxt * dlm)218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
219 {
220 if (dlm->dlm_reco_thread_task) {
221 mlog(0, "waiting for dlm recovery thread to exit\n");
222 kthread_stop(dlm->dlm_reco_thread_task);
223 dlm->dlm_reco_thread_task = NULL;
224 }
225 }
226
227
228
229 /*
230 * this is lame, but here's how recovery works...
231 * 1) all recovery threads cluster wide will work on recovering
232 * ONE node at a time
233 * 2) negotiate who will take over all the locks for the dead node.
234 * thats right... ALL the locks.
235 * 3) once a new master is chosen, everyone scans all locks
236 * and moves aside those mastered by the dead guy
237 * 4) each of these locks should be locked until recovery is done
238 * 5) the new master collects up all of secondary lock queue info
239 * one lock at a time, forcing each node to communicate back
240 * before continuing
241 * 6) each secondary lock queue responds with the full known lock info
242 * 7) once the new master has run all its locks, it sends a ALLDONE!
243 * message to everyone
244 * 8) upon receiving this message, the secondary queue node unlocks
245 * and responds to the ALLDONE
246 * 9) once the new master gets responses from everyone, he unlocks
247 * everything and recovery for this dead node is done
248 *10) go back to 2) while there are still dead nodes
249 *
250 */
251
dlm_print_reco_node_status(struct dlm_ctxt * dlm)252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
253 {
254 struct dlm_reco_node_data *ndata;
255 struct dlm_lock_resource *res;
256
257 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
260 dlm->reco.dead_node, dlm->reco.new_master);
261
262 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
263 char *st = "unknown";
264 switch (ndata->state) {
265 case DLM_RECO_NODE_DATA_INIT:
266 st = "init";
267 break;
268 case DLM_RECO_NODE_DATA_REQUESTING:
269 st = "requesting";
270 break;
271 case DLM_RECO_NODE_DATA_DEAD:
272 st = "dead";
273 break;
274 case DLM_RECO_NODE_DATA_RECEIVING:
275 st = "receiving";
276 break;
277 case DLM_RECO_NODE_DATA_REQUESTED:
278 st = "requested";
279 break;
280 case DLM_RECO_NODE_DATA_DONE:
281 st = "done";
282 break;
283 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
284 st = "finalize-sent";
285 break;
286 default:
287 st = "bad";
288 break;
289 }
290 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
291 dlm->name, ndata->node_num, st);
292 }
293 list_for_each_entry(res, &dlm->reco.resources, recovering) {
294 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
295 dlm->name, res->lockname.len, res->lockname.name);
296 }
297 }
298
299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
300
dlm_recovery_thread(void * data)301 static int dlm_recovery_thread(void *data)
302 {
303 int status;
304 struct dlm_ctxt *dlm = data;
305 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
306
307 mlog(0, "dlm thread running for %s...\n", dlm->name);
308
309 while (!kthread_should_stop()) {
310 if (dlm_domain_fully_joined(dlm)) {
311 status = dlm_do_recovery(dlm);
312 if (status == -EAGAIN) {
313 /* do not sleep, recheck immediately. */
314 continue;
315 }
316 if (status < 0)
317 mlog_errno(status);
318 }
319
320 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
321 kthread_should_stop(),
322 timeout);
323 }
324
325 mlog(0, "quitting DLM recovery thread\n");
326 return 0;
327 }
328
329 /* returns true when the recovery master has contacted us */
dlm_reco_master_ready(struct dlm_ctxt * dlm)330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
331 {
332 int ready;
333 spin_lock(&dlm->spinlock);
334 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
335 spin_unlock(&dlm->spinlock);
336 return ready;
337 }
338
339 /* returns true if node is no longer in the domain
340 * could be dead or just not joined */
dlm_is_node_dead(struct dlm_ctxt * dlm,u8 node)341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
342 {
343 int dead;
344 spin_lock(&dlm->spinlock);
345 dead = !test_bit(node, dlm->domain_map);
346 spin_unlock(&dlm->spinlock);
347 return dead;
348 }
349
350 /* returns true if node is no longer in the domain
351 * could be dead or just not joined */
dlm_is_node_recovered(struct dlm_ctxt * dlm,u8 node)352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
353 {
354 int recovered;
355 spin_lock(&dlm->spinlock);
356 recovered = !test_bit(node, dlm->recovery_map);
357 spin_unlock(&dlm->spinlock);
358 return recovered;
359 }
360
361
dlm_wait_for_node_death(struct dlm_ctxt * dlm,u8 node,int timeout)362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
363 {
364 if (dlm_is_node_dead(dlm, node))
365 return;
366
367 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
368 "domain %s\n", node, dlm->name);
369
370 if (timeout)
371 wait_event_timeout(dlm->dlm_reco_thread_wq,
372 dlm_is_node_dead(dlm, node),
373 msecs_to_jiffies(timeout));
374 else
375 wait_event(dlm->dlm_reco_thread_wq,
376 dlm_is_node_dead(dlm, node));
377 }
378
dlm_wait_for_node_recovery(struct dlm_ctxt * dlm,u8 node,int timeout)379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
380 {
381 if (dlm_is_node_recovered(dlm, node))
382 return;
383
384 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
385 "domain %s\n", node, dlm->name);
386
387 if (timeout)
388 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout));
391 else
392 wait_event(dlm->dlm_reco_thread_wq,
393 dlm_is_node_recovered(dlm, node));
394 }
395
396 /* callers of the top-level api calls (dlmlock/dlmunlock) should
397 * block on the dlm->reco.event when recovery is in progress.
398 * the dlm recovery thread will set this state when it begins
399 * recovering a dead node (as the new master or not) and clear
400 * the state and wake as soon as all affected lock resources have
401 * been marked with the RECOVERY flag */
dlm_in_recovery(struct dlm_ctxt * dlm)402 static int dlm_in_recovery(struct dlm_ctxt *dlm)
403 {
404 int in_recovery;
405 spin_lock(&dlm->spinlock);
406 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
407 spin_unlock(&dlm->spinlock);
408 return in_recovery;
409 }
410
411
dlm_wait_for_recovery(struct dlm_ctxt * dlm)412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
413 {
414 if (dlm_in_recovery(dlm)) {
415 mlog(0, "%s: reco thread %d in recovery: "
416 "state=%d, master=%u, dead=%u\n",
417 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418 dlm->reco.state, dlm->reco.new_master,
419 dlm->reco.dead_node);
420 }
421 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
422 }
423
dlm_begin_recovery(struct dlm_ctxt * dlm)424 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
425 {
426 spin_lock(&dlm->spinlock);
427 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
429 dlm->name, dlm->reco.dead_node);
430 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
431 spin_unlock(&dlm->spinlock);
432 }
433
dlm_end_recovery(struct dlm_ctxt * dlm)434 static void dlm_end_recovery(struct dlm_ctxt *dlm)
435 {
436 spin_lock(&dlm->spinlock);
437 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
438 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
439 spin_unlock(&dlm->spinlock);
440 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
441 wake_up(&dlm->reco.event);
442 }
443
dlm_print_recovery_master(struct dlm_ctxt * dlm)444 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
445 {
446 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
447 "dead node %u in domain %s\n", dlm->reco.new_master,
448 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
449 dlm->reco.dead_node, dlm->name);
450 }
451
dlm_do_recovery(struct dlm_ctxt * dlm)452 static int dlm_do_recovery(struct dlm_ctxt *dlm)
453 {
454 int status = 0;
455 int ret;
456
457 spin_lock(&dlm->spinlock);
458
459 /* check to see if the new master has died */
460 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
461 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
462 mlog(0, "new master %u died while recovering %u!\n",
463 dlm->reco.new_master, dlm->reco.dead_node);
464 /* unset the new_master, leave dead_node */
465 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
466 }
467
468 /* select a target to recover */
469 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
470 int bit;
471
472 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
473 if (bit >= O2NM_MAX_NODES || bit < 0)
474 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
475 else
476 dlm_set_reco_dead_node(dlm, bit);
477 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
478 /* BUG? */
479 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
480 dlm->reco.dead_node);
481 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
482 }
483
484 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
485 // mlog(0, "nothing to recover! sleeping now!\n");
486 spin_unlock(&dlm->spinlock);
487 /* return to main thread loop and sleep. */
488 return 0;
489 }
490 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
491 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
492 dlm->reco.dead_node);
493 spin_unlock(&dlm->spinlock);
494
495 /* take write barrier */
496 /* (stops the list reshuffling thread, proxy ast handling) */
497 dlm_begin_recovery(dlm);
498
499 if (dlm->reco.new_master == dlm->node_num)
500 goto master_here;
501
502 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
503 /* choose a new master, returns 0 if this node
504 * is the master, -EEXIST if it's another node.
505 * this does not return until a new master is chosen
506 * or recovery completes entirely. */
507 ret = dlm_pick_recovery_master(dlm);
508 if (!ret) {
509 /* already notified everyone. go. */
510 goto master_here;
511 }
512 mlog(0, "another node will master this recovery session.\n");
513 }
514
515 dlm_print_recovery_master(dlm);
516
517 /* it is safe to start everything back up here
518 * because all of the dead node's lock resources
519 * have been marked as in-recovery */
520 dlm_end_recovery(dlm);
521
522 /* sleep out in main dlm_recovery_thread loop. */
523 return 0;
524
525 master_here:
526 dlm_print_recovery_master(dlm);
527
528 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
529 if (status < 0) {
530 /* we should never hit this anymore */
531 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
532 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
533 /* yield a bit to allow any final network messages
534 * to get handled on remaining nodes */
535 msleep(100);
536 } else {
537 /* success! see if any other nodes need recovery */
538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539 dlm->name, dlm->reco.dead_node, dlm->node_num);
540 spin_lock(&dlm->spinlock);
541 __dlm_reset_recovery(dlm);
542 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
543 spin_unlock(&dlm->spinlock);
544 }
545 dlm_end_recovery(dlm);
546
547 /* continue and look for another dead node */
548 return -EAGAIN;
549 }
550
dlm_remaster_locks(struct dlm_ctxt * dlm,u8 dead_node)551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
552 {
553 int status = 0;
554 struct dlm_reco_node_data *ndata;
555 int all_nodes_done;
556 int destroy = 0;
557 int pass = 0;
558
559 do {
560 /* we have become recovery master. there is no escaping
561 * this, so just keep trying until we get it. */
562 status = dlm_init_recovery_area(dlm, dead_node);
563 if (status < 0) {
564 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565 "retrying\n", dlm->name);
566 msleep(1000);
567 }
568 } while (status != 0);
569
570 /* safe to access the node data list without a lock, since this
571 * process is the only one to change the list */
572 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
575
576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577 ndata->node_num);
578
579 if (ndata->node_num == dlm->node_num) {
580 ndata->state = DLM_RECO_NODE_DATA_DONE;
581 continue;
582 }
583
584 do {
585 status = dlm_request_all_locks(dlm, ndata->node_num,
586 dead_node);
587 if (status < 0) {
588 mlog_errno(status);
589 if (dlm_is_host_down(status)) {
590 /* node died, ignore it for recovery */
591 status = 0;
592 ndata->state = DLM_RECO_NODE_DATA_DEAD;
593 /* wait for the domain map to catch up
594 * with the network state. */
595 wait_event_timeout(dlm->dlm_reco_thread_wq,
596 dlm_is_node_dead(dlm,
597 ndata->node_num),
598 msecs_to_jiffies(1000));
599 mlog(0, "waited 1 sec for %u, "
600 "dead? %s\n", ndata->node_num,
601 dlm_is_node_dead(dlm, ndata->node_num) ?
602 "yes" : "no");
603 } else {
604 /* -ENOMEM on the other node */
605 mlog(0, "%s: node %u returned "
606 "%d during recovery, retrying "
607 "after a short wait\n",
608 dlm->name, ndata->node_num,
609 status);
610 msleep(100);
611 }
612 }
613 } while (status != 0);
614
615 spin_lock(&dlm_reco_state_lock);
616 switch (ndata->state) {
617 case DLM_RECO_NODE_DATA_INIT:
618 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
619 case DLM_RECO_NODE_DATA_REQUESTED:
620 BUG();
621 break;
622 case DLM_RECO_NODE_DATA_DEAD:
623 mlog(0, "node %u died after requesting "
624 "recovery info for node %u\n",
625 ndata->node_num, dead_node);
626 /* fine. don't need this node's info.
627 * continue without it. */
628 break;
629 case DLM_RECO_NODE_DATA_REQUESTING:
630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
631 mlog(0, "now receiving recovery data from "
632 "node %u for dead node %u\n",
633 ndata->node_num, dead_node);
634 break;
635 case DLM_RECO_NODE_DATA_RECEIVING:
636 mlog(0, "already receiving recovery data from "
637 "node %u for dead node %u\n",
638 ndata->node_num, dead_node);
639 break;
640 case DLM_RECO_NODE_DATA_DONE:
641 mlog(0, "already DONE receiving recovery data "
642 "from node %u for dead node %u\n",
643 ndata->node_num, dead_node);
644 break;
645 }
646 spin_unlock(&dlm_reco_state_lock);
647 }
648
649 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650
651 /* nodes should be sending reco data now
652 * just need to wait */
653
654 while (1) {
655 /* check all the nodes now to see if we are
656 * done, or if anyone died */
657 all_nodes_done = 1;
658 spin_lock(&dlm_reco_state_lock);
659 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660 mlog(0, "checking recovery state of node %u\n",
661 ndata->node_num);
662 switch (ndata->state) {
663 case DLM_RECO_NODE_DATA_INIT:
664 case DLM_RECO_NODE_DATA_REQUESTING:
665 mlog(ML_ERROR, "bad ndata state for "
666 "node %u: state=%d\n",
667 ndata->node_num, ndata->state);
668 BUG();
669 break;
670 case DLM_RECO_NODE_DATA_DEAD:
671 mlog(0, "node %u died after "
672 "requesting recovery info for "
673 "node %u\n", ndata->node_num,
674 dead_node);
675 break;
676 case DLM_RECO_NODE_DATA_RECEIVING:
677 case DLM_RECO_NODE_DATA_REQUESTED:
678 mlog(0, "%s: node %u still in state %s\n",
679 dlm->name, ndata->node_num,
680 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
681 "receiving" : "requested");
682 all_nodes_done = 0;
683 break;
684 case DLM_RECO_NODE_DATA_DONE:
685 mlog(0, "%s: node %u state is done\n",
686 dlm->name, ndata->node_num);
687 break;
688 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
689 mlog(0, "%s: node %u state is finalize\n",
690 dlm->name, ndata->node_num);
691 break;
692 }
693 }
694 spin_unlock(&dlm_reco_state_lock);
695
696 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
697 all_nodes_done?"yes":"no");
698 if (all_nodes_done) {
699 int ret;
700
701 /* Set this flag on recovery master to avoid
702 * a new recovery for another dead node start
703 * before the recovery is not done. That may
704 * cause recovery hung.*/
705 spin_lock(&dlm->spinlock);
706 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
707 spin_unlock(&dlm->spinlock);
708
709 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
710 * just send a finalize message to everyone and
711 * clean up */
712 mlog(0, "all nodes are done! send finalize\n");
713 ret = dlm_send_finalize_reco_message(dlm);
714 if (ret < 0)
715 mlog_errno(ret);
716
717 spin_lock(&dlm->spinlock);
718 dlm_finish_local_lockres_recovery(dlm, dead_node,
719 dlm->node_num);
720 spin_unlock(&dlm->spinlock);
721 mlog(0, "should be done with recovery!\n");
722
723 mlog(0, "finishing recovery of %s at %lu, "
724 "dead=%u, this=%u, new=%u\n", dlm->name,
725 jiffies, dlm->reco.dead_node,
726 dlm->node_num, dlm->reco.new_master);
727 destroy = 1;
728 status = 0;
729 /* rescan everything marked dirty along the way */
730 dlm_kick_thread(dlm, NULL);
731 break;
732 }
733 /* wait to be signalled, with periodic timeout
734 * to check for node death */
735 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
736 kthread_should_stop(),
737 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
738
739 }
740
741 if (destroy)
742 dlm_destroy_recovery_area(dlm, dead_node);
743
744 return status;
745 }
746
dlm_init_recovery_area(struct dlm_ctxt * dlm,u8 dead_node)747 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
748 {
749 int num=0;
750 struct dlm_reco_node_data *ndata;
751
752 spin_lock(&dlm->spinlock);
753 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
754 /* nodes can only be removed (by dying) after dropping
755 * this lock, and death will be trapped later, so this should do */
756 spin_unlock(&dlm->spinlock);
757
758 while (1) {
759 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
760 if (num >= O2NM_MAX_NODES) {
761 break;
762 }
763 BUG_ON(num == dead_node);
764
765 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
766 if (!ndata) {
767 dlm_destroy_recovery_area(dlm, dead_node);
768 return -ENOMEM;
769 }
770 ndata->node_num = num;
771 ndata->state = DLM_RECO_NODE_DATA_INIT;
772 spin_lock(&dlm_reco_state_lock);
773 list_add_tail(&ndata->list, &dlm->reco.node_data);
774 spin_unlock(&dlm_reco_state_lock);
775 num++;
776 }
777
778 return 0;
779 }
780
dlm_destroy_recovery_area(struct dlm_ctxt * dlm,u8 dead_node)781 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
782 {
783 struct dlm_reco_node_data *ndata, *next;
784 LIST_HEAD(tmplist);
785
786 spin_lock(&dlm_reco_state_lock);
787 list_splice_init(&dlm->reco.node_data, &tmplist);
788 spin_unlock(&dlm_reco_state_lock);
789
790 list_for_each_entry_safe(ndata, next, &tmplist, list) {
791 list_del_init(&ndata->list);
792 kfree(ndata);
793 }
794 }
795
dlm_request_all_locks(struct dlm_ctxt * dlm,u8 request_from,u8 dead_node)796 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
797 u8 dead_node)
798 {
799 struct dlm_lock_request lr;
800 int ret;
801 int status;
802
803 mlog(0, "\n");
804
805
806 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
807 "to %u\n", dead_node, request_from);
808
809 memset(&lr, 0, sizeof(lr));
810 lr.node_idx = dlm->node_num;
811 lr.dead_node = dead_node;
812
813 // send message
814 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
815 &lr, sizeof(lr), request_from, &status);
816
817 /* negative status is handled by caller */
818 if (ret < 0)
819 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
820 "to recover dead node %u\n", dlm->name, ret,
821 request_from, dead_node);
822 else
823 ret = status;
824 // return from here, then
825 // sleep until all received or error
826 return ret;
827
828 }
829
dlm_request_all_locks_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)830 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
831 void **ret_data)
832 {
833 struct dlm_ctxt *dlm = data;
834 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
835 char *buf = NULL;
836 struct dlm_work_item *item = NULL;
837
838 if (!dlm_grab(dlm))
839 return -EINVAL;
840
841 if (lr->dead_node != dlm->reco.dead_node) {
842 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
843 "dead_node is %u\n", dlm->name, lr->node_idx,
844 lr->dead_node, dlm->reco.dead_node);
845 dlm_print_reco_node_status(dlm);
846 /* this is a hack */
847 dlm_put(dlm);
848 return -ENOMEM;
849 }
850 BUG_ON(lr->dead_node != dlm->reco.dead_node);
851
852 item = kzalloc(sizeof(*item), GFP_NOFS);
853 if (!item) {
854 dlm_put(dlm);
855 return -ENOMEM;
856 }
857
858 /* this will get freed by dlm_request_all_locks_worker */
859 buf = (char *) __get_free_page(GFP_NOFS);
860 if (!buf) {
861 kfree(item);
862 dlm_put(dlm);
863 return -ENOMEM;
864 }
865
866 /* queue up work for dlm_request_all_locks_worker */
867 dlm_grab(dlm); /* get an extra ref for the work item */
868 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
869 item->u.ral.reco_master = lr->node_idx;
870 item->u.ral.dead_node = lr->dead_node;
871 spin_lock(&dlm->work_lock);
872 list_add_tail(&item->list, &dlm->work_list);
873 spin_unlock(&dlm->work_lock);
874 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
875
876 dlm_put(dlm);
877 return 0;
878 }
879
dlm_request_all_locks_worker(struct dlm_work_item * item,void * data)880 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
881 {
882 struct dlm_migratable_lockres *mres;
883 struct dlm_lock_resource *res;
884 struct dlm_ctxt *dlm;
885 LIST_HEAD(resources);
886 int ret;
887 u8 dead_node, reco_master;
888 int skip_all_done = 0;
889
890 dlm = item->dlm;
891 dead_node = item->u.ral.dead_node;
892 reco_master = item->u.ral.reco_master;
893 mres = (struct dlm_migratable_lockres *)data;
894
895 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
896 dlm->name, dead_node, reco_master);
897
898 if (dead_node != dlm->reco.dead_node ||
899 reco_master != dlm->reco.new_master) {
900 /* worker could have been created before the recovery master
901 * died. if so, do not continue, but do not error. */
902 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
903 mlog(ML_NOTICE, "%s: will not send recovery state, "
904 "recovery master %u died, thread=(dead=%u,mas=%u)"
905 " current=(dead=%u,mas=%u)\n", dlm->name,
906 reco_master, dead_node, reco_master,
907 dlm->reco.dead_node, dlm->reco.new_master);
908 } else {
909 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
910 "master=%u), request(dead=%u, master=%u)\n",
911 dlm->name, dlm->reco.dead_node,
912 dlm->reco.new_master, dead_node, reco_master);
913 }
914 goto leave;
915 }
916
917 /* lock resources should have already been moved to the
918 * dlm->reco.resources list. now move items from that list
919 * to a temp list if the dead owner matches. note that the
920 * whole cluster recovers only one node at a time, so we
921 * can safely move UNKNOWN lock resources for each recovery
922 * session. */
923 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
924
925 /* now we can begin blasting lockreses without the dlm lock */
926
927 /* any errors returned will be due to the new_master dying,
928 * the dlm_reco_thread should detect this */
929 list_for_each_entry(res, &resources, recovering) {
930 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
931 DLM_MRES_RECOVERY);
932 if (ret < 0) {
933 mlog(ML_ERROR, "%s: node %u went down while sending "
934 "recovery state for dead node %u, ret=%d\n", dlm->name,
935 reco_master, dead_node, ret);
936 skip_all_done = 1;
937 break;
938 }
939 }
940
941 /* move the resources back to the list */
942 spin_lock(&dlm->spinlock);
943 list_splice_init(&resources, &dlm->reco.resources);
944 spin_unlock(&dlm->spinlock);
945
946 if (!skip_all_done) {
947 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
948 if (ret < 0) {
949 mlog(ML_ERROR, "%s: node %u went down while sending "
950 "recovery all-done for dead node %u, ret=%d\n",
951 dlm->name, reco_master, dead_node, ret);
952 }
953 }
954 leave:
955 free_page((unsigned long)data);
956 }
957
958
dlm_send_all_done_msg(struct dlm_ctxt * dlm,u8 dead_node,u8 send_to)959 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
960 {
961 int ret, tmpret;
962 struct dlm_reco_data_done done_msg;
963
964 memset(&done_msg, 0, sizeof(done_msg));
965 done_msg.node_idx = dlm->node_num;
966 done_msg.dead_node = dead_node;
967 mlog(0, "sending DATA DONE message to %u, "
968 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
969 done_msg.dead_node);
970
971 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
972 sizeof(done_msg), send_to, &tmpret);
973 if (ret < 0) {
974 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
975 "to recover dead node %u\n", dlm->name, ret, send_to,
976 dead_node);
977 if (!dlm_is_host_down(ret)) {
978 BUG();
979 }
980 } else
981 ret = tmpret;
982 return ret;
983 }
984
985
dlm_reco_data_done_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)986 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
987 void **ret_data)
988 {
989 struct dlm_ctxt *dlm = data;
990 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
991 struct dlm_reco_node_data *ndata = NULL;
992 int ret = -EINVAL;
993
994 if (!dlm_grab(dlm))
995 return -EINVAL;
996
997 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
998 "node_idx=%u, this node=%u\n", done->dead_node,
999 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1000
1001 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1002 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1003 "node_idx=%u, this node=%u\n", done->dead_node,
1004 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1005
1006 spin_lock(&dlm_reco_state_lock);
1007 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1008 if (ndata->node_num != done->node_idx)
1009 continue;
1010
1011 switch (ndata->state) {
1012 /* should have moved beyond INIT but not to FINALIZE yet */
1013 case DLM_RECO_NODE_DATA_INIT:
1014 case DLM_RECO_NODE_DATA_DEAD:
1015 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1016 mlog(ML_ERROR, "bad ndata state for node %u:"
1017 " state=%d\n", ndata->node_num,
1018 ndata->state);
1019 BUG();
1020 break;
1021 /* these states are possible at this point, anywhere along
1022 * the line of recovery */
1023 case DLM_RECO_NODE_DATA_DONE:
1024 case DLM_RECO_NODE_DATA_RECEIVING:
1025 case DLM_RECO_NODE_DATA_REQUESTED:
1026 case DLM_RECO_NODE_DATA_REQUESTING:
1027 mlog(0, "node %u is DONE sending "
1028 "recovery data!\n",
1029 ndata->node_num);
1030
1031 ndata->state = DLM_RECO_NODE_DATA_DONE;
1032 ret = 0;
1033 break;
1034 }
1035 }
1036 spin_unlock(&dlm_reco_state_lock);
1037
1038 /* wake the recovery thread, some node is done */
1039 if (!ret)
1040 dlm_kick_recovery_thread(dlm);
1041
1042 if (ret < 0)
1043 mlog(ML_ERROR, "failed to find recovery node data for node "
1044 "%u\n", done->node_idx);
1045 dlm_put(dlm);
1046
1047 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1048 return ret;
1049 }
1050
dlm_move_reco_locks_to_list(struct dlm_ctxt * dlm,struct list_head * list,u8 dead_node)1051 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1052 struct list_head *list,
1053 u8 dead_node)
1054 {
1055 struct dlm_lock_resource *res, *next;
1056 struct dlm_lock *lock;
1057
1058 spin_lock(&dlm->spinlock);
1059 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1060 /* always prune any $RECOVERY entries for dead nodes,
1061 * otherwise hangs can occur during later recovery */
1062 if (dlm_is_recovery_lock(res->lockname.name,
1063 res->lockname.len)) {
1064 spin_lock(&res->spinlock);
1065 list_for_each_entry(lock, &res->granted, list) {
1066 if (lock->ml.node == dead_node) {
1067 mlog(0, "AHA! there was "
1068 "a $RECOVERY lock for dead "
1069 "node %u (%s)!\n",
1070 dead_node, dlm->name);
1071 list_del_init(&lock->list);
1072 dlm_lock_put(lock);
1073 /* Can't schedule DLM_UNLOCK_FREE_LOCK
1074 * - do manually */
1075 dlm_lock_put(lock);
1076 break;
1077 }
1078 }
1079 spin_unlock(&res->spinlock);
1080 continue;
1081 }
1082
1083 if (res->owner == dead_node) {
1084 mlog(0, "found lockres owned by dead node while "
1085 "doing recovery for node %u. sending it.\n",
1086 dead_node);
1087 list_move_tail(&res->recovering, list);
1088 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1089 mlog(0, "found UNKNOWN owner while doing recovery "
1090 "for node %u. sending it.\n", dead_node);
1091 list_move_tail(&res->recovering, list);
1092 }
1093 }
1094 spin_unlock(&dlm->spinlock);
1095 }
1096
dlm_num_locks_in_lockres(struct dlm_lock_resource * res)1097 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1098 {
1099 int total_locks = 0;
1100 struct list_head *iter, *queue = &res->granted;
1101 int i;
1102
1103 for (i=0; i<3; i++) {
1104 list_for_each(iter, queue)
1105 total_locks++;
1106 queue++;
1107 }
1108 return total_locks;
1109 }
1110
1111
dlm_send_mig_lockres_msg(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres,u8 send_to,struct dlm_lock_resource * res,int total_locks)1112 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1113 struct dlm_migratable_lockres *mres,
1114 u8 send_to,
1115 struct dlm_lock_resource *res,
1116 int total_locks)
1117 {
1118 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1119 int mres_total_locks = be32_to_cpu(mres->total_locks);
1120 int sz, ret = 0, status = 0;
1121 u8 orig_flags = mres->flags,
1122 orig_master = mres->master;
1123
1124 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1125 if (!mres->num_locks)
1126 return 0;
1127
1128 sz = sizeof(struct dlm_migratable_lockres) +
1129 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1130
1131 /* add an all-done flag if we reached the last lock */
1132 orig_flags = mres->flags;
1133 BUG_ON(total_locks > mres_total_locks);
1134 if (total_locks == mres_total_locks)
1135 mres->flags |= DLM_MRES_ALL_DONE;
1136
1137 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1138 dlm->name, res->lockname.len, res->lockname.name,
1139 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1140 send_to);
1141
1142 /* send it */
1143 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1144 sz, send_to, &status);
1145 if (ret < 0) {
1146 /* XXX: negative status is not handled.
1147 * this will end up killing this node. */
1148 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1149 "node %u (%s)\n", dlm->name, mres->lockname_len,
1150 mres->lockname, ret, send_to,
1151 (orig_flags & DLM_MRES_MIGRATION ?
1152 "migration" : "recovery"));
1153 } else {
1154 /* might get an -ENOMEM back here */
1155 ret = status;
1156 if (ret < 0) {
1157 mlog_errno(ret);
1158
1159 if (ret == -EFAULT) {
1160 mlog(ML_ERROR, "node %u told me to kill "
1161 "myself!\n", send_to);
1162 BUG();
1163 }
1164 }
1165 }
1166
1167 /* zero and reinit the message buffer */
1168 dlm_init_migratable_lockres(mres, res->lockname.name,
1169 res->lockname.len, mres_total_locks,
1170 mig_cookie, orig_flags, orig_master);
1171 return ret;
1172 }
1173
dlm_init_migratable_lockres(struct dlm_migratable_lockres * mres,const char * lockname,int namelen,int total_locks,u64 cookie,u8 flags,u8 master)1174 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1175 const char *lockname, int namelen,
1176 int total_locks, u64 cookie,
1177 u8 flags, u8 master)
1178 {
1179 /* mres here is one full page */
1180 clear_page(mres);
1181 mres->lockname_len = namelen;
1182 memcpy(mres->lockname, lockname, namelen);
1183 mres->num_locks = 0;
1184 mres->total_locks = cpu_to_be32(total_locks);
1185 mres->mig_cookie = cpu_to_be64(cookie);
1186 mres->flags = flags;
1187 mres->master = master;
1188 }
1189
dlm_prepare_lvb_for_migration(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1190 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1191 struct dlm_migratable_lockres *mres,
1192 int queue)
1193 {
1194 if (!lock->lksb)
1195 return;
1196
1197 /* Ignore lvb in all locks in the blocked list */
1198 if (queue == DLM_BLOCKED_LIST)
1199 return;
1200
1201 /* Only consider lvbs in locks with granted EX or PR lock levels */
1202 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1203 return;
1204
1205 if (dlm_lvb_is_empty(mres->lvb)) {
1206 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1207 return;
1208 }
1209
1210 /* Ensure the lvb copied for migration matches in other valid locks */
1211 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1212 return;
1213
1214 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1215 "node=%u\n",
1216 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1217 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1218 lock->lockres->lockname.len, lock->lockres->lockname.name,
1219 lock->ml.node);
1220 dlm_print_one_lock_resource(lock->lockres);
1221 BUG();
1222 }
1223
1224 /* returns 1 if this lock fills the network structure,
1225 * 0 otherwise */
dlm_add_lock_to_array(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1226 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1227 struct dlm_migratable_lockres *mres, int queue)
1228 {
1229 struct dlm_migratable_lock *ml;
1230 int lock_num = mres->num_locks;
1231
1232 ml = &(mres->ml[lock_num]);
1233 ml->cookie = lock->ml.cookie;
1234 ml->type = lock->ml.type;
1235 ml->convert_type = lock->ml.convert_type;
1236 ml->highest_blocked = lock->ml.highest_blocked;
1237 ml->list = queue;
1238 if (lock->lksb) {
1239 ml->flags = lock->lksb->flags;
1240 dlm_prepare_lvb_for_migration(lock, mres, queue);
1241 }
1242 ml->node = lock->ml.node;
1243 mres->num_locks++;
1244 /* we reached the max, send this network message */
1245 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1246 return 1;
1247 return 0;
1248 }
1249
dlm_add_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres)1250 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1251 struct dlm_migratable_lockres *mres)
1252 {
1253 struct dlm_lock dummy;
1254 memset(&dummy, 0, sizeof(dummy));
1255 dummy.ml.cookie = 0;
1256 dummy.ml.type = LKM_IVMODE;
1257 dummy.ml.convert_type = LKM_IVMODE;
1258 dummy.ml.highest_blocked = LKM_IVMODE;
1259 dummy.lksb = NULL;
1260 dummy.ml.node = dlm->node_num;
1261 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1262 }
1263
dlm_is_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lock * ml,u8 * nodenum)1264 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1265 struct dlm_migratable_lock *ml,
1266 u8 *nodenum)
1267 {
1268 if (unlikely(ml->cookie == 0 &&
1269 ml->type == LKM_IVMODE &&
1270 ml->convert_type == LKM_IVMODE &&
1271 ml->highest_blocked == LKM_IVMODE &&
1272 ml->list == DLM_BLOCKED_LIST)) {
1273 *nodenum = ml->node;
1274 return 1;
1275 }
1276 return 0;
1277 }
1278
dlm_send_one_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres,u8 send_to,u8 flags)1279 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1280 struct dlm_migratable_lockres *mres,
1281 u8 send_to, u8 flags)
1282 {
1283 struct list_head *queue;
1284 int total_locks, i;
1285 u64 mig_cookie = 0;
1286 struct dlm_lock *lock;
1287 int ret = 0;
1288
1289 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1290
1291 mlog(0, "sending to %u\n", send_to);
1292
1293 total_locks = dlm_num_locks_in_lockres(res);
1294 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1295 /* rare, but possible */
1296 mlog(0, "argh. lockres has %d locks. this will "
1297 "require more than one network packet to "
1298 "migrate\n", total_locks);
1299 mig_cookie = dlm_get_next_mig_cookie();
1300 }
1301
1302 dlm_init_migratable_lockres(mres, res->lockname.name,
1303 res->lockname.len, total_locks,
1304 mig_cookie, flags, res->owner);
1305
1306 total_locks = 0;
1307 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1308 queue = dlm_list_idx_to_ptr(res, i);
1309 list_for_each_entry(lock, queue, list) {
1310 /* add another lock. */
1311 total_locks++;
1312 if (!dlm_add_lock_to_array(lock, mres, i))
1313 continue;
1314
1315 /* this filled the lock message,
1316 * we must send it immediately. */
1317 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1318 res, total_locks);
1319 if (ret < 0)
1320 goto error;
1321 }
1322 }
1323 if (total_locks == 0) {
1324 /* send a dummy lock to indicate a mastery reference only */
1325 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1326 dlm->name, res->lockname.len, res->lockname.name,
1327 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1328 "migration");
1329 dlm_add_dummy_lock(dlm, mres);
1330 }
1331 /* flush any remaining locks */
1332 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1333 if (ret < 0)
1334 goto error;
1335 return ret;
1336
1337 error:
1338 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1339 dlm->name, ret);
1340 if (!dlm_is_host_down(ret))
1341 BUG();
1342 mlog(0, "%s: node %u went down while sending %s "
1343 "lockres %.*s\n", dlm->name, send_to,
1344 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1345 res->lockname.len, res->lockname.name);
1346 return ret;
1347 }
1348
1349
1350
1351 /*
1352 * this message will contain no more than one page worth of
1353 * recovery data, and it will work on only one lockres.
1354 * there may be many locks in this page, and we may need to wait
1355 * for additional packets to complete all the locks (rare, but
1356 * possible).
1357 */
1358 /*
1359 * NOTE: the allocation error cases here are scary
1360 * we really cannot afford to fail an alloc in recovery
1361 * do we spin? returning an error only delays the problem really
1362 */
1363
dlm_mig_lockres_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1364 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1365 void **ret_data)
1366 {
1367 struct dlm_ctxt *dlm = data;
1368 struct dlm_migratable_lockres *mres =
1369 (struct dlm_migratable_lockres *)msg->buf;
1370 int ret = 0;
1371 u8 real_master;
1372 u8 extra_refs = 0;
1373 char *buf = NULL;
1374 struct dlm_work_item *item = NULL;
1375 struct dlm_lock_resource *res = NULL;
1376 unsigned int hash;
1377
1378 if (!dlm_grab(dlm))
1379 return -EINVAL;
1380
1381 if (!dlm_joined(dlm)) {
1382 mlog(ML_ERROR, "Domain %s not joined! "
1383 "lockres %.*s, master %u\n",
1384 dlm->name, mres->lockname_len,
1385 mres->lockname, mres->master);
1386 dlm_put(dlm);
1387 return -EINVAL;
1388 }
1389
1390 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1391
1392 real_master = mres->master;
1393 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1394 /* cannot migrate a lockres with no master */
1395 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1396 }
1397
1398 mlog(0, "%s message received from node %u\n",
1399 (mres->flags & DLM_MRES_RECOVERY) ?
1400 "recovery" : "migration", mres->master);
1401 if (mres->flags & DLM_MRES_ALL_DONE)
1402 mlog(0, "all done flag. all lockres data received!\n");
1403
1404 ret = -ENOMEM;
1405 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1406 item = kzalloc(sizeof(*item), GFP_NOFS);
1407 if (!buf || !item)
1408 goto leave;
1409
1410 /* lookup the lock to see if we have a secondary queue for this
1411 * already... just add the locks in and this will have its owner
1412 * and RECOVERY flag changed when it completes. */
1413 hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
1414 spin_lock(&dlm->spinlock);
1415 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1416 hash);
1417 if (res) {
1418 /* this will get a ref on res */
1419 /* mark it as recovering/migrating and hash it */
1420 spin_lock(&res->spinlock);
1421 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1422 mlog(0, "%s: node is attempting to migrate "
1423 "lockres %.*s, but marked as dropping "
1424 " ref!\n", dlm->name,
1425 mres->lockname_len, mres->lockname);
1426 ret = -EINVAL;
1427 spin_unlock(&res->spinlock);
1428 spin_unlock(&dlm->spinlock);
1429 dlm_lockres_put(res);
1430 goto leave;
1431 }
1432
1433 if (mres->flags & DLM_MRES_RECOVERY) {
1434 res->state |= DLM_LOCK_RES_RECOVERING;
1435 } else {
1436 if (res->state & DLM_LOCK_RES_MIGRATING) {
1437 /* this is at least the second
1438 * lockres message */
1439 mlog(0, "lock %.*s is already migrating\n",
1440 mres->lockname_len,
1441 mres->lockname);
1442 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1443 /* caller should BUG */
1444 mlog(ML_ERROR, "node is attempting to migrate "
1445 "lock %.*s, but marked as recovering!\n",
1446 mres->lockname_len, mres->lockname);
1447 ret = -EFAULT;
1448 spin_unlock(&res->spinlock);
1449 spin_unlock(&dlm->spinlock);
1450 dlm_lockres_put(res);
1451 goto leave;
1452 }
1453 res->state |= DLM_LOCK_RES_MIGRATING;
1454 }
1455 spin_unlock(&res->spinlock);
1456 spin_unlock(&dlm->spinlock);
1457 } else {
1458 spin_unlock(&dlm->spinlock);
1459 /* need to allocate, just like if it was
1460 * mastered here normally */
1461 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1462 if (!res)
1463 goto leave;
1464
1465 /* to match the ref that we would have gotten if
1466 * dlm_lookup_lockres had succeeded */
1467 dlm_lockres_get(res);
1468
1469 /* mark it as recovering/migrating and hash it */
1470 if (mres->flags & DLM_MRES_RECOVERY)
1471 res->state |= DLM_LOCK_RES_RECOVERING;
1472 else
1473 res->state |= DLM_LOCK_RES_MIGRATING;
1474
1475 spin_lock(&dlm->spinlock);
1476 __dlm_insert_lockres(dlm, res);
1477 spin_unlock(&dlm->spinlock);
1478
1479 /* Add an extra ref for this lock-less lockres lest the
1480 * dlm_thread purges it before we get the chance to add
1481 * locks to it */
1482 dlm_lockres_get(res);
1483
1484 /* There are three refs that need to be put.
1485 * 1. Taken above.
1486 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1487 * 3. dlm_lookup_lockres()
1488 * The first one is handled at the end of this function. The
1489 * other two are handled in the worker thread after locks have
1490 * been attached. Yes, we don't wait for purge time to match
1491 * kref_init. The lockres will still have atleast one ref
1492 * added because it is in the hash __dlm_insert_lockres() */
1493 extra_refs++;
1494
1495 /* now that the new lockres is inserted,
1496 * make it usable by other processes */
1497 spin_lock(&res->spinlock);
1498 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1499 spin_unlock(&res->spinlock);
1500 wake_up(&res->wq);
1501 }
1502
1503 /* at this point we have allocated everything we need,
1504 * and we have a hashed lockres with an extra ref and
1505 * the proper res->state flags. */
1506 ret = 0;
1507 spin_lock(&res->spinlock);
1508 /* drop this either when master requery finds a different master
1509 * or when a lock is added by the recovery worker */
1510 dlm_lockres_grab_inflight_ref(dlm, res);
1511 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1512 /* migration cannot have an unknown master */
1513 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1514 mlog(0, "recovery has passed me a lockres with an "
1515 "unknown owner.. will need to requery: "
1516 "%.*s\n", mres->lockname_len, mres->lockname);
1517 } else {
1518 /* take a reference now to pin the lockres, drop it
1519 * when locks are added in the worker */
1520 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1521 }
1522 spin_unlock(&res->spinlock);
1523
1524 /* queue up work for dlm_mig_lockres_worker */
1525 dlm_grab(dlm); /* get an extra ref for the work item */
1526 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1527 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1528 item->u.ml.lockres = res; /* already have a ref */
1529 item->u.ml.real_master = real_master;
1530 item->u.ml.extra_ref = extra_refs;
1531 spin_lock(&dlm->work_lock);
1532 list_add_tail(&item->list, &dlm->work_list);
1533 spin_unlock(&dlm->work_lock);
1534 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1535
1536 leave:
1537 /* One extra ref taken needs to be put here */
1538 if (extra_refs)
1539 dlm_lockres_put(res);
1540
1541 dlm_put(dlm);
1542 if (ret < 0) {
1543 kfree(buf);
1544 kfree(item);
1545 mlog_errno(ret);
1546 }
1547
1548 return ret;
1549 }
1550
1551
dlm_mig_lockres_worker(struct dlm_work_item * item,void * data)1552 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1553 {
1554 struct dlm_ctxt *dlm;
1555 struct dlm_migratable_lockres *mres;
1556 int ret = 0;
1557 struct dlm_lock_resource *res;
1558 u8 real_master;
1559 u8 extra_ref;
1560
1561 dlm = item->dlm;
1562 mres = (struct dlm_migratable_lockres *)data;
1563
1564 res = item->u.ml.lockres;
1565 real_master = item->u.ml.real_master;
1566 extra_ref = item->u.ml.extra_ref;
1567
1568 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1569 /* this case is super-rare. only occurs if
1570 * node death happens during migration. */
1571 again:
1572 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1573 if (ret < 0) {
1574 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1575 ret);
1576 goto again;
1577 }
1578 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1579 mlog(0, "lockres %.*s not claimed. "
1580 "this node will take it.\n",
1581 res->lockname.len, res->lockname.name);
1582 } else {
1583 spin_lock(&res->spinlock);
1584 dlm_lockres_drop_inflight_ref(dlm, res);
1585 spin_unlock(&res->spinlock);
1586 mlog(0, "master needs to respond to sender "
1587 "that node %u still owns %.*s\n",
1588 real_master, res->lockname.len,
1589 res->lockname.name);
1590 /* cannot touch this lockres */
1591 goto leave;
1592 }
1593 }
1594
1595 ret = dlm_process_recovery_data(dlm, res, mres);
1596 if (ret < 0)
1597 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1598 else
1599 mlog(0, "dlm_process_recovery_data succeeded\n");
1600
1601 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1602 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1603 ret = dlm_finish_migration(dlm, res, mres->master);
1604 if (ret < 0)
1605 mlog_errno(ret);
1606 }
1607
1608 leave:
1609 /* See comment in dlm_mig_lockres_handler() */
1610 if (res) {
1611 if (extra_ref)
1612 dlm_lockres_put(res);
1613 dlm_lockres_put(res);
1614 }
1615 kfree(data);
1616 }
1617
1618
1619
dlm_lockres_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 * real_master)1620 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1621 struct dlm_lock_resource *res,
1622 u8 *real_master)
1623 {
1624 struct dlm_node_iter iter;
1625 int nodenum;
1626 int ret = 0;
1627
1628 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1629
1630 /* we only reach here if one of the two nodes in a
1631 * migration died while the migration was in progress.
1632 * at this point we need to requery the master. we
1633 * know that the new_master got as far as creating
1634 * an mle on at least one node, but we do not know
1635 * if any nodes had actually cleared the mle and set
1636 * the master to the new_master. the old master
1637 * is supposed to set the owner to UNKNOWN in the
1638 * event of a new_master death, so the only possible
1639 * responses that we can get from nodes here are
1640 * that the master is new_master, or that the master
1641 * is UNKNOWN.
1642 * if all nodes come back with UNKNOWN then we know
1643 * the lock needs remastering here.
1644 * if any node comes back with a valid master, check
1645 * to see if that master is the one that we are
1646 * recovering. if so, then the new_master died and
1647 * we need to remaster this lock. if not, then the
1648 * new_master survived and that node will respond to
1649 * other nodes about the owner.
1650 * if there is an owner, this node needs to dump this
1651 * lockres and alert the sender that this lockres
1652 * was rejected. */
1653 spin_lock(&dlm->spinlock);
1654 dlm_node_iter_init(dlm->domain_map, &iter);
1655 spin_unlock(&dlm->spinlock);
1656
1657 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1658 /* do not send to self */
1659 if (nodenum == dlm->node_num)
1660 continue;
1661 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1662 if (ret < 0) {
1663 mlog_errno(ret);
1664 if (!dlm_is_host_down(ret))
1665 BUG();
1666 /* host is down, so answer for that node would be
1667 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1668 }
1669 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1670 mlog(0, "lock master is %u\n", *real_master);
1671 break;
1672 }
1673 }
1674 return ret;
1675 }
1676
1677
dlm_do_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 nodenum,u8 * real_master)1678 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1679 u8 nodenum, u8 *real_master)
1680 {
1681 int ret = -EINVAL;
1682 struct dlm_master_requery req;
1683 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1684
1685 memset(&req, 0, sizeof(req));
1686 req.node_idx = dlm->node_num;
1687 req.namelen = res->lockname.len;
1688 memcpy(req.name, res->lockname.name, res->lockname.len);
1689
1690 resend:
1691 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1692 &req, sizeof(req), nodenum, &status);
1693 if (ret < 0)
1694 mlog(ML_ERROR, "Error %d when sending message %u (key "
1695 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1696 dlm->key, nodenum);
1697 else if (status == -ENOMEM) {
1698 mlog_errno(status);
1699 msleep(50);
1700 goto resend;
1701 } else {
1702 BUG_ON(status < 0);
1703 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1704 *real_master = (u8) (status & 0xff);
1705 mlog(0, "node %u responded to master requery with %u\n",
1706 nodenum, *real_master);
1707 ret = 0;
1708 }
1709 return ret;
1710 }
1711
1712
1713 /* this function cannot error, so unless the sending
1714 * or receiving of the message failed, the owner can
1715 * be trusted */
dlm_master_requery_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1716 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1717 void **ret_data)
1718 {
1719 struct dlm_ctxt *dlm = data;
1720 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1721 struct dlm_lock_resource *res = NULL;
1722 unsigned int hash;
1723 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1724 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1725 int dispatched = 0;
1726
1727 if (!dlm_grab(dlm)) {
1728 /* since the domain has gone away on this
1729 * node, the proper response is UNKNOWN */
1730 return master;
1731 }
1732
1733 hash = dlm_lockid_hash(req->name, req->namelen);
1734
1735 spin_lock(&dlm->spinlock);
1736 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1737 if (res) {
1738 spin_lock(&res->spinlock);
1739 master = res->owner;
1740 if (master == dlm->node_num) {
1741 int ret = dlm_dispatch_assert_master(dlm, res,
1742 0, 0, flags);
1743 if (ret < 0) {
1744 mlog_errno(ret);
1745 spin_unlock(&res->spinlock);
1746 dlm_lockres_put(res);
1747 spin_unlock(&dlm->spinlock);
1748 dlm_put(dlm);
1749 /* sender will take care of this and retry */
1750 return ret;
1751 } else {
1752 dispatched = 1;
1753 __dlm_lockres_grab_inflight_worker(dlm, res);
1754 spin_unlock(&res->spinlock);
1755 }
1756 } else {
1757 /* put.. incase we are not the master */
1758 spin_unlock(&res->spinlock);
1759 dlm_lockres_put(res);
1760 }
1761 }
1762 spin_unlock(&dlm->spinlock);
1763
1764 if (!dispatched)
1765 dlm_put(dlm);
1766 return master;
1767 }
1768
1769 static inline struct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource * res,int list_num)1770 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1771 {
1772 struct list_head *ret;
1773 BUG_ON(list_num < 0);
1774 BUG_ON(list_num > 2);
1775 ret = &(res->granted);
1776 ret += list_num;
1777 return ret;
1778 }
1779 /* TODO: do ast flush business
1780 * TODO: do MIGRATING and RECOVERING spinning
1781 */
1782
1783 /*
1784 * NOTE about in-flight requests during migration:
1785 *
1786 * Before attempting the migrate, the master has marked the lockres as
1787 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1788 * requests either got queued before the MIGRATING flag got set, in which
1789 * case the lock data will reflect the change and a return message is on
1790 * the way, or the request failed to get in before MIGRATING got set. In
1791 * this case, the caller will be told to spin and wait for the MIGRATING
1792 * flag to be dropped, then recheck the master.
1793 * This holds true for the convert, cancel and unlock cases, and since lvb
1794 * updates are tied to these same messages, it applies to lvb updates as
1795 * well. For the lock case, there is no way a lock can be on the master
1796 * queue and not be on the secondary queue since the lock is always added
1797 * locally first. This means that the new target node will never be sent
1798 * a lock that he doesn't already have on the list.
1799 * In total, this means that the local lock is correct and should not be
1800 * updated to match the one sent by the master. Any messages sent back
1801 * from the master before the MIGRATING flag will bring the lock properly
1802 * up-to-date, and the change will be ordered properly for the waiter.
1803 * We will *not* attempt to modify the lock underneath the waiter.
1804 */
1805
dlm_process_recovery_data(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres)1806 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1807 struct dlm_lock_resource *res,
1808 struct dlm_migratable_lockres *mres)
1809 {
1810 struct dlm_migratable_lock *ml;
1811 struct list_head *queue, *iter;
1812 struct list_head *tmpq = NULL;
1813 struct dlm_lock *newlock = NULL;
1814 struct dlm_lockstatus *lksb = NULL;
1815 int ret = 0;
1816 int i, j, bad;
1817 struct dlm_lock *lock;
1818 u8 from = O2NM_MAX_NODES;
1819 unsigned int added = 0;
1820 __be64 c;
1821
1822 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1823 for (i=0; i<mres->num_locks; i++) {
1824 ml = &(mres->ml[i]);
1825
1826 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1827 /* placeholder, just need to set the refmap bit */
1828 BUG_ON(mres->num_locks != 1);
1829 mlog(0, "%s:%.*s: dummy lock for %u\n",
1830 dlm->name, mres->lockname_len, mres->lockname,
1831 from);
1832 spin_lock(&res->spinlock);
1833 dlm_lockres_set_refmap_bit(dlm, res, from);
1834 spin_unlock(&res->spinlock);
1835 added++;
1836 break;
1837 }
1838 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1839 newlock = NULL;
1840 lksb = NULL;
1841
1842 queue = dlm_list_num_to_pointer(res, ml->list);
1843 tmpq = NULL;
1844
1845 /* if the lock is for the local node it needs to
1846 * be moved to the proper location within the queue.
1847 * do not allocate a new lock structure. */
1848 if (ml->node == dlm->node_num) {
1849 /* MIGRATION ONLY! */
1850 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1851
1852 lock = NULL;
1853 spin_lock(&res->spinlock);
1854 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1855 tmpq = dlm_list_idx_to_ptr(res, j);
1856 list_for_each(iter, tmpq) {
1857 lock = list_entry(iter,
1858 struct dlm_lock, list);
1859 if (lock->ml.cookie == ml->cookie)
1860 break;
1861 lock = NULL;
1862 }
1863 if (lock)
1864 break;
1865 }
1866
1867 /* lock is always created locally first, and
1868 * destroyed locally last. it must be on the list */
1869 if (!lock) {
1870 c = ml->cookie;
1871 mlog(ML_ERROR, "Could not find local lock "
1872 "with cookie %u:%llu, node %u, "
1873 "list %u, flags 0x%x, type %d, "
1874 "conv %d, highest blocked %d\n",
1875 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1876 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1877 ml->node, ml->list, ml->flags, ml->type,
1878 ml->convert_type, ml->highest_blocked);
1879 __dlm_print_one_lock_resource(res);
1880 BUG();
1881 }
1882
1883 if (lock->ml.node != ml->node) {
1884 c = lock->ml.cookie;
1885 mlog(ML_ERROR, "Mismatched node# in lock "
1886 "cookie %u:%llu, name %.*s, node %u\n",
1887 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1888 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1889 res->lockname.len, res->lockname.name,
1890 lock->ml.node);
1891 c = ml->cookie;
1892 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1893 "node %u, list %u, flags 0x%x, type %d, "
1894 "conv %d, highest blocked %d\n",
1895 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1896 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1897 ml->node, ml->list, ml->flags, ml->type,
1898 ml->convert_type, ml->highest_blocked);
1899 __dlm_print_one_lock_resource(res);
1900 BUG();
1901 }
1902
1903 if (tmpq != queue) {
1904 c = ml->cookie;
1905 mlog(0, "Lock cookie %u:%llu was on list %u "
1906 "instead of list %u for %.*s\n",
1907 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1908 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1909 j, ml->list, res->lockname.len,
1910 res->lockname.name);
1911 __dlm_print_one_lock_resource(res);
1912 spin_unlock(&res->spinlock);
1913 continue;
1914 }
1915
1916 /* see NOTE above about why we do not update
1917 * to match the master here */
1918
1919 /* move the lock to its proper place */
1920 /* do not alter lock refcount. switching lists. */
1921 list_move_tail(&lock->list, queue);
1922 spin_unlock(&res->spinlock);
1923 added++;
1924
1925 mlog(0, "just reordered a local lock!\n");
1926 continue;
1927 }
1928
1929 /* lock is for another node. */
1930 newlock = dlm_new_lock(ml->type, ml->node,
1931 be64_to_cpu(ml->cookie), NULL);
1932 if (!newlock) {
1933 ret = -ENOMEM;
1934 goto leave;
1935 }
1936 lksb = newlock->lksb;
1937 dlm_lock_attach_lockres(newlock, res);
1938
1939 if (ml->convert_type != LKM_IVMODE) {
1940 BUG_ON(queue != &res->converting);
1941 newlock->ml.convert_type = ml->convert_type;
1942 }
1943 lksb->flags |= (ml->flags &
1944 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1945
1946 if (ml->type == LKM_NLMODE)
1947 goto skip_lvb;
1948
1949 /*
1950 * If the lock is in the blocked list it can't have a valid lvb,
1951 * so skip it
1952 */
1953 if (ml->list == DLM_BLOCKED_LIST)
1954 goto skip_lvb;
1955
1956 if (!dlm_lvb_is_empty(mres->lvb)) {
1957 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1958 /* other node was trying to update
1959 * lvb when node died. recreate the
1960 * lksb with the updated lvb. */
1961 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1962 /* the lock resource lvb update must happen
1963 * NOW, before the spinlock is dropped.
1964 * we no longer wait for the AST to update
1965 * the lvb. */
1966 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1967 } else {
1968 /* otherwise, the node is sending its
1969 * most recent valid lvb info */
1970 BUG_ON(ml->type != LKM_EXMODE &&
1971 ml->type != LKM_PRMODE);
1972 if (!dlm_lvb_is_empty(res->lvb) &&
1973 (ml->type == LKM_EXMODE ||
1974 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1975 int i;
1976 mlog(ML_ERROR, "%s:%.*s: received bad "
1977 "lvb! type=%d\n", dlm->name,
1978 res->lockname.len,
1979 res->lockname.name, ml->type);
1980 printk("lockres lvb=[");
1981 for (i=0; i<DLM_LVB_LEN; i++)
1982 printk("%02x", res->lvb[i]);
1983 printk("]\nmigrated lvb=[");
1984 for (i=0; i<DLM_LVB_LEN; i++)
1985 printk("%02x", mres->lvb[i]);
1986 printk("]\n");
1987 dlm_print_one_lock_resource(res);
1988 BUG();
1989 }
1990 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1991 }
1992 }
1993 skip_lvb:
1994
1995 /* NOTE:
1996 * wrt lock queue ordering and recovery:
1997 * 1. order of locks on granted queue is
1998 * meaningless.
1999 * 2. order of locks on converting queue is
2000 * LOST with the node death. sorry charlie.
2001 * 3. order of locks on the blocked queue is
2002 * also LOST.
2003 * order of locks does not affect integrity, it
2004 * just means that a lock request may get pushed
2005 * back in line as a result of the node death.
2006 * also note that for a given node the lock order
2007 * for its secondary queue locks is preserved
2008 * relative to each other, but clearly *not*
2009 * preserved relative to locks from other nodes.
2010 */
2011 bad = 0;
2012 spin_lock(&res->spinlock);
2013 list_for_each_entry(lock, queue, list) {
2014 if (lock->ml.cookie == ml->cookie) {
2015 c = lock->ml.cookie;
2016 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
2017 "exists on this lockres!\n", dlm->name,
2018 res->lockname.len, res->lockname.name,
2019 dlm_get_lock_cookie_node(be64_to_cpu(c)),
2020 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
2021
2022 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
2023 "node=%u, cookie=%u:%llu, queue=%d\n",
2024 ml->type, ml->convert_type, ml->node,
2025 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
2026 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2027 ml->list);
2028
2029 __dlm_print_one_lock_resource(res);
2030 bad = 1;
2031 break;
2032 }
2033 }
2034 if (!bad) {
2035 dlm_lock_get(newlock);
2036 if (mres->flags & DLM_MRES_RECOVERY &&
2037 ml->list == DLM_CONVERTING_LIST &&
2038 newlock->ml.type >
2039 newlock->ml.convert_type) {
2040 /* newlock is doing downconvert, add it to the
2041 * head of converting list */
2042 list_add(&newlock->list, queue);
2043 } else
2044 list_add_tail(&newlock->list, queue);
2045 mlog(0, "%s:%.*s: added lock for node %u, "
2046 "setting refmap bit\n", dlm->name,
2047 res->lockname.len, res->lockname.name, ml->node);
2048 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2049 added++;
2050 }
2051 spin_unlock(&res->spinlock);
2052 }
2053 mlog(0, "done running all the locks\n");
2054
2055 leave:
2056 /* balance the ref taken when the work was queued */
2057 spin_lock(&res->spinlock);
2058 dlm_lockres_drop_inflight_ref(dlm, res);
2059 spin_unlock(&res->spinlock);
2060
2061 if (ret < 0)
2062 mlog_errno(ret);
2063
2064 return ret;
2065 }
2066
dlm_move_lockres_to_recovery_list(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)2067 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2068 struct dlm_lock_resource *res)
2069 {
2070 int i;
2071 struct list_head *queue;
2072 struct dlm_lock *lock, *next;
2073
2074 assert_spin_locked(&dlm->spinlock);
2075 assert_spin_locked(&res->spinlock);
2076 res->state |= DLM_LOCK_RES_RECOVERING;
2077 if (!list_empty(&res->recovering)) {
2078 mlog(0,
2079 "Recovering res %s:%.*s, is already on recovery list!\n",
2080 dlm->name, res->lockname.len, res->lockname.name);
2081 list_del_init(&res->recovering);
2082 dlm_lockres_put(res);
2083 }
2084 /* We need to hold a reference while on the recovery list */
2085 dlm_lockres_get(res);
2086 list_add_tail(&res->recovering, &dlm->reco.resources);
2087
2088 /* find any pending locks and put them back on proper list */
2089 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2090 queue = dlm_list_idx_to_ptr(res, i);
2091 list_for_each_entry_safe(lock, next, queue, list) {
2092 dlm_lock_get(lock);
2093 if (lock->convert_pending) {
2094 /* move converting lock back to granted */
2095 mlog(0, "node died with convert pending "
2096 "on %.*s. move back to granted list.\n",
2097 res->lockname.len, res->lockname.name);
2098 dlm_revert_pending_convert(res, lock);
2099 lock->convert_pending = 0;
2100 } else if (lock->lock_pending) {
2101 /* remove pending lock requests completely */
2102 BUG_ON(i != DLM_BLOCKED_LIST);
2103 mlog(0, "node died with lock pending "
2104 "on %.*s. remove from blocked list and skip.\n",
2105 res->lockname.len, res->lockname.name);
2106 /* lock will be floating until ref in
2107 * dlmlock_remote is freed after the network
2108 * call returns. ok for it to not be on any
2109 * list since no ast can be called
2110 * (the master is dead). */
2111 dlm_revert_pending_lock(res, lock);
2112 lock->lock_pending = 0;
2113 } else if (lock->unlock_pending) {
2114 /* if an unlock was in progress, treat as
2115 * if this had completed successfully
2116 * before sending this lock state to the
2117 * new master. note that the dlm_unlock
2118 * call is still responsible for calling
2119 * the unlockast. that will happen after
2120 * the network call times out. for now,
2121 * just move lists to prepare the new
2122 * recovery master. */
2123 BUG_ON(i != DLM_GRANTED_LIST);
2124 mlog(0, "node died with unlock pending "
2125 "on %.*s. remove from blocked list and skip.\n",
2126 res->lockname.len, res->lockname.name);
2127 dlm_commit_pending_unlock(res, lock);
2128 lock->unlock_pending = 0;
2129 } else if (lock->cancel_pending) {
2130 /* if a cancel was in progress, treat as
2131 * if this had completed successfully
2132 * before sending this lock state to the
2133 * new master */
2134 BUG_ON(i != DLM_CONVERTING_LIST);
2135 mlog(0, "node died with cancel pending "
2136 "on %.*s. move back to granted list.\n",
2137 res->lockname.len, res->lockname.name);
2138 dlm_commit_pending_cancel(res, lock);
2139 lock->cancel_pending = 0;
2140 }
2141 dlm_lock_put(lock);
2142 }
2143 }
2144 }
2145
2146
2147
2148 /* removes all recovered locks from the recovery list.
2149 * sets the res->owner to the new master.
2150 * unsets the RECOVERY flag and wakes waiters. */
dlm_finish_local_lockres_recovery(struct dlm_ctxt * dlm,u8 dead_node,u8 new_master)2151 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2152 u8 dead_node, u8 new_master)
2153 {
2154 int i;
2155 struct hlist_head *bucket;
2156 struct dlm_lock_resource *res, *next;
2157
2158 assert_spin_locked(&dlm->spinlock);
2159
2160 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2161 if (res->owner == dead_node) {
2162 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2163 dlm->name, res->lockname.len, res->lockname.name,
2164 res->owner, new_master);
2165 list_del_init(&res->recovering);
2166 spin_lock(&res->spinlock);
2167 /* new_master has our reference from
2168 * the lock state sent during recovery */
2169 dlm_change_lockres_owner(dlm, res, new_master);
2170 res->state &= ~DLM_LOCK_RES_RECOVERING;
2171 if (__dlm_lockres_has_locks(res))
2172 __dlm_dirty_lockres(dlm, res);
2173 spin_unlock(&res->spinlock);
2174 wake_up(&res->wq);
2175 dlm_lockres_put(res);
2176 }
2177 }
2178
2179 /* this will become unnecessary eventually, but
2180 * for now we need to run the whole hash, clear
2181 * the RECOVERING state and set the owner
2182 * if necessary */
2183 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2184 bucket = dlm_lockres_hash(dlm, i);
2185 hlist_for_each_entry(res, bucket, hash_node) {
2186 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2187 spin_lock(&res->spinlock);
2188 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2189 spin_unlock(&res->spinlock);
2190 wake_up(&res->wq);
2191 }
2192
2193 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2194 continue;
2195
2196 if (res->owner != dead_node &&
2197 res->owner != dlm->node_num)
2198 continue;
2199
2200 if (!list_empty(&res->recovering)) {
2201 list_del_init(&res->recovering);
2202 dlm_lockres_put(res);
2203 }
2204
2205 /* new_master has our reference from
2206 * the lock state sent during recovery */
2207 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2208 dlm->name, res->lockname.len, res->lockname.name,
2209 res->owner, new_master);
2210 spin_lock(&res->spinlock);
2211 dlm_change_lockres_owner(dlm, res, new_master);
2212 res->state &= ~DLM_LOCK_RES_RECOVERING;
2213 if (__dlm_lockres_has_locks(res))
2214 __dlm_dirty_lockres(dlm, res);
2215 spin_unlock(&res->spinlock);
2216 wake_up(&res->wq);
2217 }
2218 }
2219 }
2220
dlm_lvb_needs_invalidation(struct dlm_lock * lock,int local)2221 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2222 {
2223 if (local) {
2224 if (lock->ml.type != LKM_EXMODE &&
2225 lock->ml.type != LKM_PRMODE)
2226 return 1;
2227 } else if (lock->ml.type == LKM_EXMODE)
2228 return 1;
2229 return 0;
2230 }
2231
dlm_revalidate_lvb(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2232 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2233 struct dlm_lock_resource *res, u8 dead_node)
2234 {
2235 struct list_head *queue;
2236 struct dlm_lock *lock;
2237 int blank_lvb = 0, local = 0;
2238 int i;
2239 u8 search_node;
2240
2241 assert_spin_locked(&dlm->spinlock);
2242 assert_spin_locked(&res->spinlock);
2243
2244 if (res->owner == dlm->node_num)
2245 /* if this node owned the lockres, and if the dead node
2246 * had an EX when he died, blank out the lvb */
2247 search_node = dead_node;
2248 else {
2249 /* if this is a secondary lockres, and we had no EX or PR
2250 * locks granted, we can no longer trust the lvb */
2251 search_node = dlm->node_num;
2252 local = 1; /* check local state for valid lvb */
2253 }
2254
2255 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2256 queue = dlm_list_idx_to_ptr(res, i);
2257 list_for_each_entry(lock, queue, list) {
2258 if (lock->ml.node == search_node) {
2259 if (dlm_lvb_needs_invalidation(lock, local)) {
2260 /* zero the lksb lvb and lockres lvb */
2261 blank_lvb = 1;
2262 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2263 }
2264 }
2265 }
2266 }
2267
2268 if (blank_lvb) {
2269 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2270 res->lockname.len, res->lockname.name, dead_node);
2271 memset(res->lvb, 0, DLM_LVB_LEN);
2272 }
2273 }
2274
dlm_free_dead_locks(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2275 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2276 struct dlm_lock_resource *res, u8 dead_node)
2277 {
2278 struct dlm_lock *lock, *next;
2279 unsigned int freed = 0;
2280
2281 /* this node is the lockres master:
2282 * 1) remove any stale locks for the dead node
2283 * 2) if the dead node had an EX when he died, blank out the lvb
2284 */
2285 assert_spin_locked(&dlm->spinlock);
2286 assert_spin_locked(&res->spinlock);
2287
2288 /* We do two dlm_lock_put(). One for removing from list and the other is
2289 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2290
2291 /* TODO: check pending_asts, pending_basts here */
2292 list_for_each_entry_safe(lock, next, &res->granted, list) {
2293 if (lock->ml.node == dead_node) {
2294 list_del_init(&lock->list);
2295 dlm_lock_put(lock);
2296 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2297 dlm_lock_put(lock);
2298 freed++;
2299 }
2300 }
2301 list_for_each_entry_safe(lock, next, &res->converting, list) {
2302 if (lock->ml.node == dead_node) {
2303 list_del_init(&lock->list);
2304 dlm_lock_put(lock);
2305 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2306 dlm_lock_put(lock);
2307 freed++;
2308 }
2309 }
2310 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2311 if (lock->ml.node == dead_node) {
2312 list_del_init(&lock->list);
2313 dlm_lock_put(lock);
2314 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2315 dlm_lock_put(lock);
2316 freed++;
2317 }
2318 }
2319
2320 if (freed) {
2321 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2322 "dropping ref from lockres\n", dlm->name,
2323 res->lockname.len, res->lockname.name, freed, dead_node);
2324 if(!test_bit(dead_node, res->refmap)) {
2325 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2326 "but ref was not set\n", dlm->name,
2327 res->lockname.len, res->lockname.name, freed, dead_node);
2328 __dlm_print_one_lock_resource(res);
2329 }
2330 res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2331 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2332 } else if (test_bit(dead_node, res->refmap)) {
2333 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2334 "no locks and had not purged before dying\n", dlm->name,
2335 res->lockname.len, res->lockname.name, dead_node);
2336 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2337 }
2338
2339 /* do not kick thread yet */
2340 __dlm_dirty_lockres(dlm, res);
2341 }
2342
2343 /* if this node is the recovery master, and there are no
2344 * locks for a given lockres owned by this node that are in
2345 * either PR or EX mode, zero out the lvb before requesting.
2346 *
2347 */
2348
2349
dlm_do_local_recovery_cleanup(struct dlm_ctxt * dlm,u8 dead_node)2350 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2351 {
2352 struct dlm_lock_resource *res;
2353 int i;
2354 struct hlist_head *bucket;
2355 struct hlist_node *tmp;
2356 struct dlm_lock *lock;
2357
2358
2359 /* purge any stale mles */
2360 dlm_clean_master_list(dlm, dead_node);
2361
2362 /*
2363 * now clean up all lock resources. there are two rules:
2364 *
2365 * 1) if the dead node was the master, move the lockres
2366 * to the recovering list. set the RECOVERING flag.
2367 * this lockres needs to be cleaned up before it can
2368 * be used further.
2369 *
2370 * 2) if this node was the master, remove all locks from
2371 * each of the lockres queues that were owned by the
2372 * dead node. once recovery finishes, the dlm thread
2373 * can be kicked again to see if any ASTs or BASTs
2374 * need to be fired as a result.
2375 */
2376 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2377 bucket = dlm_lockres_hash(dlm, i);
2378 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2379 /* always prune any $RECOVERY entries for dead nodes,
2380 * otherwise hangs can occur during later recovery */
2381 if (dlm_is_recovery_lock(res->lockname.name,
2382 res->lockname.len)) {
2383 spin_lock(&res->spinlock);
2384 list_for_each_entry(lock, &res->granted, list) {
2385 if (lock->ml.node == dead_node) {
2386 mlog(0, "AHA! there was "
2387 "a $RECOVERY lock for dead "
2388 "node %u (%s)!\n",
2389 dead_node, dlm->name);
2390 list_del_init(&lock->list);
2391 dlm_lock_put(lock);
2392 /* Can't schedule
2393 * DLM_UNLOCK_FREE_LOCK
2394 * - do manually */
2395 dlm_lock_put(lock);
2396 break;
2397 }
2398 }
2399
2400 if ((res->owner == dead_node) &&
2401 (res->state & DLM_LOCK_RES_DROPPING_REF)) {
2402 dlm_lockres_get(res);
2403 __dlm_do_purge_lockres(dlm, res);
2404 spin_unlock(&res->spinlock);
2405 wake_up(&res->wq);
2406 dlm_lockres_put(res);
2407 continue;
2408 } else if (res->owner == dlm->node_num)
2409 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2410 spin_unlock(&res->spinlock);
2411 continue;
2412 }
2413 spin_lock(&res->spinlock);
2414 /* zero the lvb if necessary */
2415 dlm_revalidate_lvb(dlm, res, dead_node);
2416 if (res->owner == dead_node) {
2417 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2418 mlog(0, "%s:%.*s: owned by "
2419 "dead node %u, this node was "
2420 "dropping its ref when master died. "
2421 "continue, purging the lockres.\n",
2422 dlm->name, res->lockname.len,
2423 res->lockname.name, dead_node);
2424 dlm_lockres_get(res);
2425 __dlm_do_purge_lockres(dlm, res);
2426 spin_unlock(&res->spinlock);
2427 wake_up(&res->wq);
2428 dlm_lockres_put(res);
2429 continue;
2430 }
2431 dlm_move_lockres_to_recovery_list(dlm, res);
2432 } else if (res->owner == dlm->node_num) {
2433 dlm_free_dead_locks(dlm, res, dead_node);
2434 __dlm_lockres_calc_usage(dlm, res);
2435 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2436 if (test_bit(dead_node, res->refmap)) {
2437 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2438 "no locks and had not purged before dying\n",
2439 dlm->name, res->lockname.len,
2440 res->lockname.name, dead_node);
2441 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2442 }
2443 }
2444 spin_unlock(&res->spinlock);
2445 }
2446 }
2447
2448 }
2449
__dlm_hb_node_down(struct dlm_ctxt * dlm,int idx)2450 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2451 {
2452 assert_spin_locked(&dlm->spinlock);
2453
2454 if (dlm->reco.new_master == idx) {
2455 mlog(0, "%s: recovery master %d just died\n",
2456 dlm->name, idx);
2457 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2458 /* finalize1 was reached, so it is safe to clear
2459 * the new_master and dead_node. that recovery
2460 * is complete. */
2461 mlog(0, "%s: dead master %d had reached "
2462 "finalize1 state, clearing\n", dlm->name, idx);
2463 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2464 __dlm_reset_recovery(dlm);
2465 }
2466 }
2467
2468 /* Clean up join state on node death. */
2469 if (dlm->joining_node == idx) {
2470 mlog(0, "Clearing join state for node %u\n", idx);
2471 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2472 }
2473
2474 /* check to see if the node is already considered dead */
2475 if (!test_bit(idx, dlm->live_nodes_map)) {
2476 mlog(0, "for domain %s, node %d is already dead. "
2477 "another node likely did recovery already.\n",
2478 dlm->name, idx);
2479 return;
2480 }
2481
2482 /* check to see if we do not care about this node */
2483 if (!test_bit(idx, dlm->domain_map)) {
2484 /* This also catches the case that we get a node down
2485 * but haven't joined the domain yet. */
2486 mlog(0, "node %u already removed from domain!\n", idx);
2487 return;
2488 }
2489
2490 clear_bit(idx, dlm->live_nodes_map);
2491
2492 /* make sure local cleanup occurs before the heartbeat events */
2493 if (!test_bit(idx, dlm->recovery_map))
2494 dlm_do_local_recovery_cleanup(dlm, idx);
2495
2496 /* notify anything attached to the heartbeat events */
2497 dlm_hb_event_notify_attached(dlm, idx, 0);
2498
2499 mlog(0, "node %u being removed from domain map!\n", idx);
2500 clear_bit(idx, dlm->domain_map);
2501 clear_bit(idx, dlm->exit_domain_map);
2502 /* wake up migration waiters if a node goes down.
2503 * perhaps later we can genericize this for other waiters. */
2504 wake_up(&dlm->migration_wq);
2505
2506 set_bit(idx, dlm->recovery_map);
2507 }
2508
dlm_hb_node_down_cb(struct o2nm_node * node,int idx,void * data)2509 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2510 {
2511 struct dlm_ctxt *dlm = data;
2512
2513 if (!dlm_grab(dlm))
2514 return;
2515
2516 /*
2517 * This will notify any dlm users that a node in our domain
2518 * went away without notifying us first.
2519 */
2520 if (test_bit(idx, dlm->domain_map))
2521 dlm_fire_domain_eviction_callbacks(dlm, idx);
2522
2523 spin_lock(&dlm->spinlock);
2524 __dlm_hb_node_down(dlm, idx);
2525 spin_unlock(&dlm->spinlock);
2526
2527 dlm_put(dlm);
2528 }
2529
dlm_hb_node_up_cb(struct o2nm_node * node,int idx,void * data)2530 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2531 {
2532 struct dlm_ctxt *dlm = data;
2533
2534 if (!dlm_grab(dlm))
2535 return;
2536
2537 spin_lock(&dlm->spinlock);
2538 set_bit(idx, dlm->live_nodes_map);
2539 /* do NOT notify mle attached to the heartbeat events.
2540 * new nodes are not interesting in mastery until joined. */
2541 spin_unlock(&dlm->spinlock);
2542
2543 dlm_put(dlm);
2544 }
2545
dlm_reco_ast(void * astdata)2546 static void dlm_reco_ast(void *astdata)
2547 {
2548 struct dlm_ctxt *dlm = astdata;
2549 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2550 dlm->node_num, dlm->name);
2551 }
dlm_reco_bast(void * astdata,int blocked_type)2552 static void dlm_reco_bast(void *astdata, int blocked_type)
2553 {
2554 struct dlm_ctxt *dlm = astdata;
2555 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2556 dlm->node_num, dlm->name);
2557 }
dlm_reco_unlock_ast(void * astdata,enum dlm_status st)2558 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2559 {
2560 mlog(0, "unlockast for recovery lock fired!\n");
2561 }
2562
2563 /*
2564 * dlm_pick_recovery_master will continually attempt to use
2565 * dlmlock() on the special "$RECOVERY" lockres with the
2566 * LKM_NOQUEUE flag to get an EX. every thread that enters
2567 * this function on each node racing to become the recovery
2568 * master will not stop attempting this until either:
2569 * a) this node gets the EX (and becomes the recovery master),
2570 * or b) dlm->reco.new_master gets set to some nodenum
2571 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2572 * so each time a recovery master is needed, the entire cluster
2573 * will sync at this point. if the new master dies, that will
2574 * be detected in dlm_do_recovery */
dlm_pick_recovery_master(struct dlm_ctxt * dlm)2575 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2576 {
2577 enum dlm_status ret;
2578 struct dlm_lockstatus lksb;
2579 int status = -EINVAL;
2580
2581 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2582 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2583 again:
2584 memset(&lksb, 0, sizeof(lksb));
2585
2586 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2587 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2588 dlm_reco_ast, dlm, dlm_reco_bast);
2589
2590 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2591 dlm->name, ret, lksb.status);
2592
2593 if (ret == DLM_NORMAL) {
2594 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2595 dlm->name, dlm->node_num);
2596
2597 /* got the EX lock. check to see if another node
2598 * just became the reco master */
2599 if (dlm_reco_master_ready(dlm)) {
2600 mlog(0, "%s: got reco EX lock, but %u will "
2601 "do the recovery\n", dlm->name,
2602 dlm->reco.new_master);
2603 status = -EEXIST;
2604 } else {
2605 status = 0;
2606
2607 /* see if recovery was already finished elsewhere */
2608 spin_lock(&dlm->spinlock);
2609 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2610 status = -EINVAL;
2611 mlog(0, "%s: got reco EX lock, but "
2612 "node got recovered already\n", dlm->name);
2613 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2614 mlog(ML_ERROR, "%s: new master is %u "
2615 "but no dead node!\n",
2616 dlm->name, dlm->reco.new_master);
2617 BUG();
2618 }
2619 }
2620 spin_unlock(&dlm->spinlock);
2621 }
2622
2623 /* if this node has actually become the recovery master,
2624 * set the master and send the messages to begin recovery */
2625 if (!status) {
2626 mlog(0, "%s: dead=%u, this=%u, sending "
2627 "begin_reco now\n", dlm->name,
2628 dlm->reco.dead_node, dlm->node_num);
2629 status = dlm_send_begin_reco_message(dlm,
2630 dlm->reco.dead_node);
2631 /* this always succeeds */
2632 BUG_ON(status);
2633
2634 /* set the new_master to this node */
2635 spin_lock(&dlm->spinlock);
2636 dlm_set_reco_master(dlm, dlm->node_num);
2637 spin_unlock(&dlm->spinlock);
2638 }
2639
2640 /* recovery lock is a special case. ast will not get fired,
2641 * so just go ahead and unlock it. */
2642 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2643 if (ret == DLM_DENIED) {
2644 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2645 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2646 }
2647 if (ret != DLM_NORMAL) {
2648 /* this would really suck. this could only happen
2649 * if there was a network error during the unlock
2650 * because of node death. this means the unlock
2651 * is actually "done" and the lock structure is
2652 * even freed. we can continue, but only
2653 * because this specific lock name is special. */
2654 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2655 }
2656 } else if (ret == DLM_NOTQUEUED) {
2657 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2658 dlm->name, dlm->node_num);
2659 /* another node is master. wait on
2660 * reco.new_master != O2NM_INVALID_NODE_NUM
2661 * for at most one second */
2662 wait_event_timeout(dlm->dlm_reco_thread_wq,
2663 dlm_reco_master_ready(dlm),
2664 msecs_to_jiffies(1000));
2665 if (!dlm_reco_master_ready(dlm)) {
2666 mlog(0, "%s: reco master taking awhile\n",
2667 dlm->name);
2668 goto again;
2669 }
2670 /* another node has informed this one that it is reco master */
2671 mlog(0, "%s: reco master %u is ready to recover %u\n",
2672 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2673 status = -EEXIST;
2674 } else if (ret == DLM_RECOVERING) {
2675 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2676 dlm->name, dlm->node_num);
2677 goto again;
2678 } else {
2679 struct dlm_lock_resource *res;
2680
2681 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2682 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2683 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2684 dlm_errname(lksb.status));
2685 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2686 DLM_RECOVERY_LOCK_NAME_LEN);
2687 if (res) {
2688 dlm_print_one_lock_resource(res);
2689 dlm_lockres_put(res);
2690 } else {
2691 mlog(ML_ERROR, "recovery lock not found\n");
2692 }
2693 BUG();
2694 }
2695
2696 return status;
2697 }
2698
dlm_send_begin_reco_message(struct dlm_ctxt * dlm,u8 dead_node)2699 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2700 {
2701 struct dlm_begin_reco br;
2702 int ret = 0;
2703 struct dlm_node_iter iter;
2704 int nodenum;
2705 int status;
2706
2707 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2708
2709 spin_lock(&dlm->spinlock);
2710 dlm_node_iter_init(dlm->domain_map, &iter);
2711 spin_unlock(&dlm->spinlock);
2712
2713 clear_bit(dead_node, iter.node_map);
2714
2715 memset(&br, 0, sizeof(br));
2716 br.node_idx = dlm->node_num;
2717 br.dead_node = dead_node;
2718
2719 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2720 ret = 0;
2721 if (nodenum == dead_node) {
2722 mlog(0, "not sending begin reco to dead node "
2723 "%u\n", dead_node);
2724 continue;
2725 }
2726 if (nodenum == dlm->node_num) {
2727 mlog(0, "not sending begin reco to self\n");
2728 continue;
2729 }
2730 retry:
2731 ret = -EINVAL;
2732 mlog(0, "attempting to send begin reco msg to %d\n",
2733 nodenum);
2734 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2735 &br, sizeof(br), nodenum, &status);
2736 /* negative status is handled ok by caller here */
2737 if (ret >= 0)
2738 ret = status;
2739 if (dlm_is_host_down(ret)) {
2740 /* node is down. not involved in recovery
2741 * so just keep going */
2742 mlog(ML_NOTICE, "%s: node %u was down when sending "
2743 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2744 ret = 0;
2745 }
2746
2747 /*
2748 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2749 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2750 * We are handling both for compatibility reasons.
2751 */
2752 if (ret == -EAGAIN || ret == EAGAIN) {
2753 mlog(0, "%s: trying to start recovery of node "
2754 "%u, but node %u is waiting for last recovery "
2755 "to complete, backoff for a bit\n", dlm->name,
2756 dead_node, nodenum);
2757 msleep(100);
2758 goto retry;
2759 }
2760 if (ret < 0) {
2761 struct dlm_lock_resource *res;
2762
2763 /* this is now a serious problem, possibly ENOMEM
2764 * in the network stack. must retry */
2765 mlog_errno(ret);
2766 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2767 "returned %d\n", dlm->name, nodenum, ret);
2768 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2769 DLM_RECOVERY_LOCK_NAME_LEN);
2770 if (res) {
2771 dlm_print_one_lock_resource(res);
2772 dlm_lockres_put(res);
2773 } else {
2774 mlog(ML_ERROR, "recovery lock not found\n");
2775 }
2776 /* sleep for a bit in hopes that we can avoid
2777 * another ENOMEM */
2778 msleep(100);
2779 goto retry;
2780 }
2781 }
2782
2783 return ret;
2784 }
2785
dlm_begin_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2786 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2787 void **ret_data)
2788 {
2789 struct dlm_ctxt *dlm = data;
2790 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2791
2792 /* ok to return 0, domain has gone away */
2793 if (!dlm_grab(dlm))
2794 return 0;
2795
2796 spin_lock(&dlm->spinlock);
2797 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2798 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2799 "but this node is in finalize state, waiting on finalize2\n",
2800 dlm->name, br->node_idx, br->dead_node,
2801 dlm->reco.dead_node, dlm->reco.new_master);
2802 spin_unlock(&dlm->spinlock);
2803 dlm_put(dlm);
2804 return -EAGAIN;
2805 }
2806 spin_unlock(&dlm->spinlock);
2807
2808 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2809 dlm->name, br->node_idx, br->dead_node,
2810 dlm->reco.dead_node, dlm->reco.new_master);
2811
2812 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2813
2814 spin_lock(&dlm->spinlock);
2815 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2816 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2817 mlog(0, "%s: new_master %u died, changing "
2818 "to %u\n", dlm->name, dlm->reco.new_master,
2819 br->node_idx);
2820 } else {
2821 mlog(0, "%s: new_master %u NOT DEAD, changing "
2822 "to %u\n", dlm->name, dlm->reco.new_master,
2823 br->node_idx);
2824 /* may not have seen the new master as dead yet */
2825 }
2826 }
2827 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2828 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2829 "node %u changing it to %u\n", dlm->name,
2830 dlm->reco.dead_node, br->node_idx, br->dead_node);
2831 }
2832 dlm_set_reco_master(dlm, br->node_idx);
2833 dlm_set_reco_dead_node(dlm, br->dead_node);
2834 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2835 mlog(0, "recovery master %u sees %u as dead, but this "
2836 "node has not yet. marking %u as dead\n",
2837 br->node_idx, br->dead_node, br->dead_node);
2838 if (!test_bit(br->dead_node, dlm->domain_map) ||
2839 !test_bit(br->dead_node, dlm->live_nodes_map))
2840 mlog(0, "%u not in domain/live_nodes map "
2841 "so setting it in reco map manually\n",
2842 br->dead_node);
2843 /* force the recovery cleanup in __dlm_hb_node_down
2844 * both of these will be cleared in a moment */
2845 set_bit(br->dead_node, dlm->domain_map);
2846 set_bit(br->dead_node, dlm->live_nodes_map);
2847 __dlm_hb_node_down(dlm, br->dead_node);
2848 }
2849 spin_unlock(&dlm->spinlock);
2850
2851 dlm_kick_recovery_thread(dlm);
2852
2853 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2854 dlm->name, br->node_idx, br->dead_node,
2855 dlm->reco.dead_node, dlm->reco.new_master);
2856
2857 dlm_put(dlm);
2858 return 0;
2859 }
2860
2861 #define DLM_FINALIZE_STAGE2 0x01
dlm_send_finalize_reco_message(struct dlm_ctxt * dlm)2862 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2863 {
2864 int ret = 0;
2865 struct dlm_finalize_reco fr;
2866 struct dlm_node_iter iter;
2867 int nodenum;
2868 int status;
2869 int stage = 1;
2870
2871 mlog(0, "finishing recovery for node %s:%u, "
2872 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2873
2874 spin_lock(&dlm->spinlock);
2875 dlm_node_iter_init(dlm->domain_map, &iter);
2876 spin_unlock(&dlm->spinlock);
2877
2878 stage2:
2879 memset(&fr, 0, sizeof(fr));
2880 fr.node_idx = dlm->node_num;
2881 fr.dead_node = dlm->reco.dead_node;
2882 if (stage == 2)
2883 fr.flags |= DLM_FINALIZE_STAGE2;
2884
2885 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2886 if (nodenum == dlm->node_num)
2887 continue;
2888 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2889 &fr, sizeof(fr), nodenum, &status);
2890 if (ret >= 0)
2891 ret = status;
2892 if (ret < 0) {
2893 mlog(ML_ERROR, "Error %d when sending message %u (key "
2894 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2895 dlm->key, nodenum);
2896 if (dlm_is_host_down(ret)) {
2897 /* this has no effect on this recovery
2898 * session, so set the status to zero to
2899 * finish out the last recovery */
2900 mlog(ML_ERROR, "node %u went down after this "
2901 "node finished recovery.\n", nodenum);
2902 ret = 0;
2903 continue;
2904 }
2905 break;
2906 }
2907 }
2908 if (stage == 1) {
2909 /* reset the node_iter back to the top and send finalize2 */
2910 iter.curnode = -1;
2911 stage = 2;
2912 goto stage2;
2913 }
2914
2915 return ret;
2916 }
2917
dlm_finalize_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2918 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2919 void **ret_data)
2920 {
2921 struct dlm_ctxt *dlm = data;
2922 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2923 int stage = 1;
2924
2925 /* ok to return 0, domain has gone away */
2926 if (!dlm_grab(dlm))
2927 return 0;
2928
2929 if (fr->flags & DLM_FINALIZE_STAGE2)
2930 stage = 2;
2931
2932 mlog(0, "%s: node %u finalizing recovery stage%d of "
2933 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2934 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2935
2936 spin_lock(&dlm->spinlock);
2937
2938 if (dlm->reco.new_master != fr->node_idx) {
2939 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2940 "%u is supposed to be the new master, dead=%u\n",
2941 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2942 BUG();
2943 }
2944 if (dlm->reco.dead_node != fr->dead_node) {
2945 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2946 "node %u, but node %u is supposed to be dead\n",
2947 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2948 BUG();
2949 }
2950
2951 switch (stage) {
2952 case 1:
2953 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2954 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2955 mlog(ML_ERROR, "%s: received finalize1 from "
2956 "new master %u for dead node %u, but "
2957 "this node has already received it!\n",
2958 dlm->name, fr->node_idx, fr->dead_node);
2959 dlm_print_reco_node_status(dlm);
2960 BUG();
2961 }
2962 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2963 spin_unlock(&dlm->spinlock);
2964 break;
2965 case 2:
2966 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2967 mlog(ML_ERROR, "%s: received finalize2 from "
2968 "new master %u for dead node %u, but "
2969 "this node did not have finalize1!\n",
2970 dlm->name, fr->node_idx, fr->dead_node);
2971 dlm_print_reco_node_status(dlm);
2972 BUG();
2973 }
2974 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2975 __dlm_reset_recovery(dlm);
2976 spin_unlock(&dlm->spinlock);
2977 dlm_kick_recovery_thread(dlm);
2978 break;
2979 }
2980
2981 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2982 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2983
2984 dlm_put(dlm);
2985 return 0;
2986 }
2987