Lines Matching +full:mode +full:- +full:recovery
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright 2004-2011 Red Hat, Inc.
27 * gfs2_update_stats - Update time based stats
50 s64 delta = sample - s->stats[index]; in gfs2_update_stats()
51 s->stats[index] += (delta >> 3); in gfs2_update_stats()
53 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; in gfs2_update_stats()
57 * gfs2_update_reply_times - Update locking statistics
60 * This assumes that gl->gl_dstamp has been set earlier.
70 * TRY_1CB flags are set are classified as non-blocking. All
76 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
77 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
83 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
85 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ in gfs2_update_reply_times()
92 * gfs2_update_request_times - Update locking statistics
95 * The irt (lock inter-request times) measures the average time
103 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
108 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
109 gl->gl_dstamp = ktime_get_real(); in gfs2_update_request_times()
110 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); in gfs2_update_request_times()
111 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_request_times()
112 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ in gfs2_update_request_times()
113 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ in gfs2_update_request_times()
120 unsigned ret = gl->gl_state; in gdlm_ast()
123 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); in gdlm_ast()
125 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) in gdlm_ast()
126 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); in gdlm_ast()
128 switch (gl->gl_lksb.sb_status) { in gdlm_ast()
129 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ in gdlm_ast()
132 case -DLM_ECANCEL: /* Cancel while getting lock */ in gdlm_ast()
135 case -EAGAIN: /* Try lock fails */ in gdlm_ast()
136 case -EDEADLK: /* Deadlock detected */ in gdlm_ast()
138 case -ETIMEDOUT: /* Canceled due to timeout */ in gdlm_ast()
147 ret = gl->gl_req; in gdlm_ast()
148 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { in gdlm_ast()
149 if (gl->gl_req == LM_ST_SHARED) in gdlm_ast()
151 else if (gl->gl_req == LM_ST_DEFERRED) in gdlm_ast()
157 set_bit(GLF_INITIAL, &gl->gl_flags); in gdlm_ast()
161 if (!test_bit(GLF_INITIAL, &gl->gl_flags)) in gdlm_ast()
162 gl->gl_lksb.sb_lkid = 0; in gdlm_ast()
166 static void gdlm_bast(void *arg, int mode) in gdlm_bast() argument
170 switch (mode) { in gdlm_bast()
181 pr_err("unknown bast mode %d\n", mode); in gdlm_bast()
186 /* convert gfs lock-state to dlm lock-mode */
202 return -1; in make_mode()
210 if (gl->gl_lksb.sb_lvbptr) in make_flags()
235 if (gl->gl_lksb.sb_lkid != 0) { in make_flags()
237 if (test_bit(GLF_BLOCKING, &gl->gl_flags)) in make_flags()
248 *c-- = hex_asc[value & 0x0f]; in gfs2_reverse_hex()
256 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_lock()
265 if (gl->gl_lksb.sb_lkid) { in gdlm_lock()
268 memset(strname, ' ', GDLM_STRNAME_BYTES - 1); in gdlm_lock()
269 strname[GDLM_STRNAME_BYTES - 1] = '\0'; in gdlm_lock()
270 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); in gdlm_lock()
271 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); in gdlm_lock()
272 gl->gl_dstamp = ktime_get_real(); in gdlm_lock()
278 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
279 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); in gdlm_lock()
284 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gdlm_put_lock()
285 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock()
289 if (gl->gl_lksb.sb_lkid == 0) { in gdlm_put_lock()
294 clear_bit(GLF_BLOCKING, &gl->gl_flags); in gdlm_put_lock()
301 if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) in gdlm_put_lock()
304 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && in gdlm_put_lock()
310 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
314 gl->gl_name.ln_type, in gdlm_put_lock()
315 (unsigned long long)gl->gl_name.ln_number, error); in gdlm_put_lock()
322 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel()
323 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
327 * dlm/gfs2 recovery coordination using dlm_recover callbacks
330 * 2. dlm_controld blocks dlm-kernel locking activity
331 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
332 * 4. dlm_controld starts and finishes its own user level recovery
333 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
335 * 7. dlm_recoverd does its own lock recovery
336 * 8. dlm_recoverd unblocks dlm-kernel locking activity
341 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
345 * - failures during recovery
360 * - more specific gfs2 steps in sequence above
372 * - parallel recovery steps across all nodes
386 * - is there a problem with clearing an lvb bit that should be set
387 * and missing a journal recovery?
398 * require recovery, because the mount in step 4 could not have
402 * and returning. The mount in step 4 waits until the recovery in
405 * - special case of first mounter: first node to mount the fs
408 * and recover any that need recovery before other nodes are allowed
418 * The mounted_lock is demoted to PR when first recovery is done, so
422 * mounter is doing first mount recovery of all journals.
423 * A mounting node needs to acquire control_lock in EX mode before
425 * the first mount recovery, blocking mounts from other nodes, then demotes
433 * do first mounter recovery
434 * mounted_lock EX->PR
435 * control_lock EX->NL, write lvb generation
438 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
439 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
442 * control_lock EX->NL
445 * - mount during recovery
447 * If a node mounts while others are doing recovery (not first mounter),
456 * - control_lock lvb format
462 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
463 * that jid N needs recovery.
472 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
481 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
483 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
489 GDLM_LVB_SIZE - JID_BITMAP_OFFSET); in all_jid_bits_clear()
495 complete(&ls->ls_sync_wait); in sync_wait_cb()
500 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock()
503 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
506 name, lksb->sb_lkid, error); in sync_unlock()
510 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
512 if (lksb->sb_status != -DLM_EUNLOCK) { in sync_unlock()
514 name, lksb->sb_lkid, lksb->sb_status); in sync_unlock()
515 return -1; in sync_unlock()
520 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, in sync_lock() argument
523 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock()
530 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
531 strname, GDLM_STRNAME_BYTES - 1, in sync_lock()
534 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", in sync_lock()
535 name, lksb->sb_lkid, flags, mode, error); in sync_lock()
539 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
541 status = lksb->sb_status; in sync_lock()
543 if (status && status != -EAGAIN) { in sync_lock()
544 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", in sync_lock()
545 name, lksb->sb_lkid, flags, mode, status); in sync_lock()
553 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock()
554 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
557 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in mounted_lock() argument
559 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock()
560 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, in mounted_lock()
561 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
566 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock()
567 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
570 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in control_lock() argument
572 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock()
573 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, in control_lock()
574 &ls->ls_control_lksb, "control_lock"); in control_lock()
580 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func()
587 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
593 * FIRST_MOUNT means this node is doing first mounter recovery, in gfs2_control_func()
594 * for which recovery control is handled by in gfs2_control_func()
597 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
598 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
599 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
602 block_gen = ls->ls_recover_block; in gfs2_control_func()
603 start_gen = ls->ls_recover_start; in gfs2_control_func()
604 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
609 * dlm recovery is in progress and dlm locking is blocked. in gfs2_control_func()
618 * dlm_recoverd adds to recover_submit[] jids needing recovery in gfs2_control_func()
619 * gfs2_recover adds to recover_result[] journal recovery results in gfs2_control_func()
625 * the journal recovery is SUCCESS in gfs2_control_func()
634 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
636 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
637 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
638 start_gen != ls->ls_recover_start) { in gfs2_control_func()
640 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
641 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
646 recover_size = ls->ls_recover_size; in gfs2_control_func()
653 * in succession. Only the first will really do recovery, in gfs2_control_func()
655 * recovery. So, another node may have already recovered in gfs2_control_func()
659 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
662 ls->ls_recover_result[i] = 0; in gfs2_control_func()
664 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
667 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
677 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
679 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
680 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
687 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
689 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
690 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
691 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
702 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
705 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
719 * and clear a jid bit in the lvb if the recovery is a success. in gfs2_control_func()
725 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
736 * No more jid bits set in lvb, all recovery is done, unblock locks in gfs2_control_func()
741 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
742 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
743 ls->ls_recover_start == start_gen) { in gfs2_control_func()
744 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
745 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
750 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
751 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
757 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount()
763 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
764 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
765 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
766 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
767 init_completion(&ls->ls_sync_wait); in control_mount()
769 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
787 error = -EINTR; in control_mount()
804 * Other nodes need to do some work in dlm recovery and gfs2_control in control_mount()
818 if (error == -EAGAIN) { in control_mount()
827 * we cannot do the first-mount responsibility it implies: recovery. in control_mount()
829 if (sdp->sd_args.ar_spectator) in control_mount()
836 } else if (error != -EAGAIN) { in control_mount()
846 /* not even -EAGAIN should happen here */ in control_mount()
860 * lvb_gen will be non-zero. in control_mount()
863 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
868 error = -EINVAL; in control_mount()
873 /* first mounter, keep both EX while doing first recovery */ in control_mount()
874 spin_lock(&ls->ls_recover_spin); in control_mount()
875 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
876 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
877 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
878 spin_unlock(&ls->ls_recover_spin); in control_mount()
893 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
894 /* journals need recovery, wait until all are clear */ in control_mount()
895 fs_info(sdp, "control_mount wait for journal recovery\n"); in control_mount()
899 spin_lock(&ls->ls_recover_spin); in control_mount()
900 block_gen = ls->ls_recover_block; in control_mount()
901 start_gen = ls->ls_recover_start; in control_mount()
902 mount_gen = ls->ls_recover_mount; in control_mount()
906 generation, which might include new recovery bits set */ in control_mount()
907 if (sdp->sd_args.ar_spectator) { in control_mount()
908 fs_info(sdp, "Recovery is required. Waiting for a " in control_mount()
909 "non-spectator to mount.\n"); in control_mount()
915 ls->ls_recover_flags); in control_mount()
917 spin_unlock(&ls->ls_recover_spin); in control_mount()
923 latest recovery generation */ in control_mount()
926 lvb_gen, ls->ls_recover_flags); in control_mount()
927 spin_unlock(&ls->ls_recover_spin); in control_mount()
932 /* dlm recovery in progress, wait for it to finish */ in control_mount()
935 lvb_gen, ls->ls_recover_flags); in control_mount()
936 spin_unlock(&ls->ls_recover_spin); in control_mount()
940 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
941 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
942 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
943 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
944 spin_unlock(&ls->ls_recover_spin); in control_mount()
955 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done()
960 spin_lock(&ls->ls_recover_spin); in control_first_done()
961 start_gen = ls->ls_recover_start; in control_first_done()
962 block_gen = ls->ls_recover_block; in control_first_done()
964 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
965 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
966 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
969 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
970 spin_unlock(&ls->ls_recover_spin); in control_first_done()
972 return -1; in control_first_done()
977 * Wait for the end of a dlm recovery cycle to switch from in control_first_done()
978 * first mounter recovery. We can ignore any recover_slot in control_first_done()
981 * have not fully mounted, so they don't need recovery. in control_first_done()
983 spin_unlock(&ls->ls_recover_spin); in control_first_done()
986 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
991 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
992 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
993 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
994 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
995 spin_unlock(&ls->ls_recover_spin); in control_first_done()
997 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
998 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1014 * gfs2 jids start at 0, so jid = slot - 1)
1022 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size()
1028 if (!ls->ls_lvb_bits) { in set_recover_size()
1029 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1030 if (!ls->ls_lvb_bits) in set_recover_size()
1031 return -ENOMEM; in set_recover_size()
1036 if (max_jid < slots[i].slot - 1) in set_recover_size()
1037 max_jid = slots[i].slot - 1; in set_recover_size()
1040 old_size = ls->ls_recover_size; in set_recover_size()
1052 return -ENOMEM; in set_recover_size()
1055 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1056 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1057 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1058 kfree(ls->ls_recover_submit); in set_recover_size()
1059 kfree(ls->ls_recover_result); in set_recover_size()
1060 ls->ls_recover_submit = submit; in set_recover_size()
1061 ls->ls_recover_result = result; in set_recover_size()
1062 ls->ls_recover_size = new_size; in set_recover_size()
1063 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1069 kfree(ls->ls_lvb_bits); in free_recover_size()
1070 kfree(ls->ls_recover_submit); in free_recover_size()
1071 kfree(ls->ls_recover_result); in free_recover_size()
1072 ls->ls_recover_submit = NULL; in free_recover_size()
1073 ls->ls_recover_result = NULL; in free_recover_size()
1074 ls->ls_recover_size = 0; in free_recover_size()
1075 ls->ls_lvb_bits = NULL; in free_recover_size()
1078 /* dlm calls before it does lock recovery */
1083 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep()
1085 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1086 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1087 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1089 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1090 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1091 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1094 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1095 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1104 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot()
1105 int jid = slot->slot - 1; in gdlm_recover_slot()
1107 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1108 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1110 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1111 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1115 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1117 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1119 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1120 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1123 /* dlm calls after recover_slot and after it completes lock recovery */
1129 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done()
1134 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1135 ls->ls_recover_start = generation; in gdlm_recover_done()
1137 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1138 ls->ls_recover_mount = generation; in gdlm_recover_done()
1139 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1142 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1143 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); in gdlm_recover_done()
1145 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1147 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1148 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1151 /* gfs2_recover thread has a journal recovery result */
1156 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result()
1158 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1161 /* don't care about the recovery of own journal during mount */ in gdlm_recovery_result()
1162 if (jid == ls->ls_jid) in gdlm_recovery_result()
1165 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1166 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1167 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1170 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1172 jid, ls->ls_recover_size); in gdlm_recovery_result()
1173 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1180 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1186 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1187 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, in gdlm_recovery_result()
1189 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1200 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount()
1210 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); in gdlm_mount()
1211 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1212 ls->ls_recover_flags = 0; in gdlm_mount()
1213 ls->ls_recover_mount = 0; in gdlm_mount()
1214 ls->ls_recover_start = 0; in gdlm_mount()
1215 ls->ls_recover_block = 0; in gdlm_mount()
1216 ls->ls_recover_size = 0; in gdlm_mount()
1217 ls->ls_recover_submit = NULL; in gdlm_mount()
1218 ls->ls_recover_result = NULL; in gdlm_mount()
1219 ls->ls_lvb_bits = NULL; in gdlm_mount()
1232 error = -EINVAL; in gdlm_mount()
1236 memcpy(cluster, table, strlen(table) - strlen(fsname)); in gdlm_mount()
1247 &ls->ls_dlm); in gdlm_mount()
1260 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1264 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { in gdlm_mount()
1266 error = -EINVAL; in gdlm_mount()
1281 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1282 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); in gdlm_mount()
1284 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); in gdlm_mount()
1288 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1297 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done()
1300 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1310 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount()
1312 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1317 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1318 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1319 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1320 flush_delayed_work(&sdp->sd_control_work); in gdlm_unmount()
1322 /* mounted_lock and control_lock will be purged in dlm recovery */ in gdlm_unmount()
1324 if (ls->ls_dlm) { in gdlm_unmount()
1325 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1326 ls->ls_dlm = NULL; in gdlm_unmount()