• Home
  • Raw
  • Download

Lines Matching +full:cluster +full:- +full:mode

18 #include "md-bitmap.h"
19 #include "md-cluster.h"
31 void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
33 int mode; member
58 /* If cluster operations (such as adding a disk) must lock the
66 /* We should receive message after node joined cluster and
125 res->sync_locking_done = true; in sync_ast()
126 wake_up(&res->sync_locking); in sync_ast()
129 static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) in dlm_lock_sync() argument
133 ret = dlm_lock(res->ls, mode, &res->lksb, in dlm_lock_sync()
134 res->flags, res->name, strlen(res->name), in dlm_lock_sync()
135 0, sync_ast, res, res->bast); in dlm_lock_sync()
138 wait_event(res->sync_locking, res->sync_locking_done); in dlm_lock_sync()
139 res->sync_locking_done = false; in dlm_lock_sync()
140 if (res->lksb.sb_status == 0) in dlm_lock_sync()
141 res->mode = mode; in dlm_lock_sync()
142 return res->lksb.sb_status; in dlm_lock_sync()
154 static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode, in dlm_lock_sync_interruptible() argument
159 ret = dlm_lock(res->ls, mode, &res->lksb, in dlm_lock_sync_interruptible()
160 res->flags, res->name, strlen(res->name), in dlm_lock_sync_interruptible()
161 0, sync_ast, res, res->bast); in dlm_lock_sync_interruptible()
165 wait_event(res->sync_locking, res->sync_locking_done in dlm_lock_sync_interruptible()
167 || test_bit(MD_CLOSING, &mddev->flags)); in dlm_lock_sync_interruptible()
168 if (!res->sync_locking_done) { in dlm_lock_sync_interruptible()
174 ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL, in dlm_lock_sync_interruptible()
175 &res->lksb, res); in dlm_lock_sync_interruptible()
176 res->sync_locking_done = false; in dlm_lock_sync_interruptible()
179 "%s return %d\n", res->name, ret); in dlm_lock_sync_interruptible()
180 return -EPERM; in dlm_lock_sync_interruptible()
182 res->sync_locking_done = false; in dlm_lock_sync_interruptible()
183 if (res->lksb.sb_status == 0) in dlm_lock_sync_interruptible()
184 res->mode = mode; in dlm_lock_sync_interruptible()
185 return res->lksb.sb_status; in dlm_lock_sync_interruptible()
189 char *name, void (*bastfn)(void *arg, int mode), int with_lvb) in lockres_init() argument
193 struct md_cluster_info *cinfo = mddev->cluster_info; in lockres_init()
198 init_waitqueue_head(&res->sync_locking); in lockres_init()
199 res->sync_locking_done = false; in lockres_init()
200 res->ls = cinfo->lockspace; in lockres_init()
201 res->mddev = mddev; in lockres_init()
202 res->mode = DLM_LOCK_IV; in lockres_init()
204 res->name = kzalloc(namelen + 1, GFP_KERNEL); in lockres_init()
205 if (!res->name) { in lockres_init()
206 pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name); in lockres_init()
209 strlcpy(res->name, name, namelen + 1); in lockres_init()
211 res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL); in lockres_init()
212 if (!res->lksb.sb_lvbptr) { in lockres_init()
213 pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name); in lockres_init()
216 res->flags = DLM_LKF_VALBLK; in lockres_init()
220 res->bast = bastfn; in lockres_init()
222 res->flags |= DLM_LKF_EXPEDITE; in lockres_init()
226 pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name); in lockres_init()
229 res->flags &= ~DLM_LKF_EXPEDITE; in lockres_init()
230 res->flags |= DLM_LKF_CONVERT; in lockres_init()
234 kfree(res->lksb.sb_lvbptr); in lockres_init()
235 kfree(res->name); in lockres_init()
251 ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK, in lockres_free()
252 &res->lksb, res); in lockres_free()
254 pr_err("failed to unlock %s return %d\n", res->name, ret); in lockres_free()
256 wait_event(res->sync_locking, res->sync_locking_done); in lockres_free()
258 kfree(res->name); in lockres_free()
259 kfree(res->lksb.sb_lvbptr); in lockres_free()
268 ri = (struct resync_info *)lockres->lksb.sb_lvbptr; in add_resync_info()
269 ri->lo = cpu_to_le64(lo); in add_resync_info()
270 ri->hi = cpu_to_le64(hi); in add_resync_info()
280 memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); in read_resync_info()
286 s->hi = hi; in read_resync_info()
287 s->lo = le64_to_cpu(ri.lo); in read_resync_info()
296 struct mddev *mddev = thread->mddev; in recover_bitmaps()
297 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_bitmaps()
304 while (cinfo->recovery_map) { in recover_bitmaps()
305 slot = fls64((u64)cinfo->recovery_map) - 1; in recover_bitmaps()
310 pr_err("md-cluster: Cannot initialize bitmaps\n"); in recover_bitmaps()
316 pr_err("md-cluster: Could not DLM lock %s: %d\n", in recover_bitmaps()
322 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); in recover_bitmaps()
327 spin_lock_irq(&cinfo->suspend_lock); in recover_bitmaps()
328 list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) in recover_bitmaps()
329 if (slot == s->slot) { in recover_bitmaps()
330 list_del(&s->list); in recover_bitmaps()
333 spin_unlock_irq(&cinfo->suspend_lock); in recover_bitmaps()
336 if (lo < mddev->recovery_cp) in recover_bitmaps()
337 mddev->recovery_cp = lo; in recover_bitmaps()
340 if (mddev->recovery_cp != MaxSector) { in recover_bitmaps()
346 &mddev->recovery); in recover_bitmaps()
347 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in recover_bitmaps()
348 md_wakeup_thread(mddev->thread); in recover_bitmaps()
353 clear_bit(slot, &cinfo->recovery_map); in recover_bitmaps()
360 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_prep()
361 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); in recover_prep()
366 struct md_cluster_info *cinfo = mddev->cluster_info; in __recover_slot()
368 set_bit(slot, &cinfo->recovery_map); in __recover_slot()
369 if (!cinfo->recovery_thread) { in __recover_slot()
370 cinfo->recovery_thread = md_register_thread(recover_bitmaps, in __recover_slot()
372 if (!cinfo->recovery_thread) { in __recover_slot()
373 pr_warn("md-cluster: Could not create recovery thread\n"); in __recover_slot()
377 md_wakeup_thread(cinfo->recovery_thread); in __recover_slot()
383 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_slot()
385 pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n", in recover_slot()
386 mddev->bitmap_info.cluster_name, in recover_slot()
387 slot->nodeid, slot->slot, in recover_slot()
388 cinfo->slot_number); in recover_slot()
390 * cluster-md begins with 0 */ in recover_slot()
391 __recover_slot(mddev, slot->slot - 1); in recover_slot()
399 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_done()
401 cinfo->slot_number = our_slot; in recover_done()
402 /* completion is only need to be complete when node join cluster, in recover_done()
404 if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) { in recover_done()
405 complete(&cinfo->completion); in recover_done()
406 clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); in recover_done()
408 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); in recover_done()
411 /* the ops is called when node join the cluster, and do lock recovery
424 static void ack_bast(void *arg, int mode) in ack_bast() argument
427 struct md_cluster_info *cinfo = res->mddev->cluster_info; in ack_bast()
429 if (mode == DLM_LOCK_EX) { in ack_bast()
430 if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state)) in ack_bast()
431 md_wakeup_thread(cinfo->recv_thread); in ack_bast()
433 set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state); in ack_bast()
441 list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) in __remove_suspend_info()
442 if (slot == s->slot) { in __remove_suspend_info()
443 list_del(&s->list); in __remove_suspend_info()
451 struct md_cluster_info *cinfo = mddev->cluster_info; in remove_suspend_info()
452 mddev->pers->quiesce(mddev, 1); in remove_suspend_info()
453 spin_lock_irq(&cinfo->suspend_lock); in remove_suspend_info()
455 spin_unlock_irq(&cinfo->suspend_lock); in remove_suspend_info()
456 mddev->pers->quiesce(mddev, 0); in remove_suspend_info()
463 struct md_cluster_info *cinfo = mddev->cluster_info; in process_suspend_info()
471 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in process_suspend_info()
473 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in process_suspend_info()
474 md_wakeup_thread(mddev->thread); in process_suspend_info()
495 md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi); in process_suspend_info()
496 cinfo->sync_low = lo; in process_suspend_info()
497 cinfo->sync_hi = hi; in process_suspend_info()
502 s->slot = slot; in process_suspend_info()
503 s->lo = lo; in process_suspend_info()
504 s->hi = hi; in process_suspend_info()
505 mddev->pers->quiesce(mddev, 1); in process_suspend_info()
506 spin_lock_irq(&cinfo->suspend_lock); in process_suspend_info()
509 list_add(&s->list, &cinfo->suspend_list); in process_suspend_info()
510 spin_unlock_irq(&cinfo->suspend_lock); in process_suspend_info()
511 mddev->pers->quiesce(mddev, 0); in process_suspend_info()
517 struct md_cluster_info *cinfo = mddev->cluster_info; in process_add_new_disk()
524 sprintf(disk_uuid + len, "%pU", cmsg->uuid); in process_add_new_disk()
525 snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot)); in process_add_new_disk()
527 init_completion(&cinfo->newdisk_completion); in process_add_new_disk()
528 set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); in process_add_new_disk()
529 kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp); in process_add_new_disk()
530 wait_for_completion_timeout(&cinfo->newdisk_completion, in process_add_new_disk()
532 clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); in process_add_new_disk()
539 struct md_cluster_info *cinfo = mddev->cluster_info; in process_metadata_update()
540 mddev->good_device_nr = le32_to_cpu(msg->raid_slot); in process_metadata_update()
542 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); in process_metadata_update()
543 wait_event(mddev->thread->wqueue, in process_metadata_update()
545 test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state)); in process_metadata_update()
546 md_reload_sb(mddev, mddev->good_device_nr); in process_metadata_update()
556 rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); in process_remove_disk()
558 set_bit(ClusterRemove, &rdev->flags); in process_remove_disk()
559 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in process_remove_disk()
560 md_wakeup_thread(mddev->thread); in process_remove_disk()
564 __func__, __LINE__, le32_to_cpu(msg->raid_slot)); in process_remove_disk()
573 rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); in process_readd_disk()
574 if (rdev && test_bit(Faulty, &rdev->flags)) in process_readd_disk()
575 clear_bit(Faulty, &rdev->flags); in process_readd_disk()
578 __func__, __LINE__, le32_to_cpu(msg->raid_slot)); in process_readd_disk()
586 if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), in process_recvd_msg()
587 "node %d received it's own msg\n", le32_to_cpu(msg->slot))) in process_recvd_msg()
588 return -1; in process_recvd_msg()
589 switch (le32_to_cpu(msg->type)) { in process_recvd_msg()
594 set_capacity(mddev->gendisk, mddev->array_sectors); in process_recvd_msg()
595 revalidate_disk(mddev->gendisk); in process_recvd_msg()
598 set_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in process_recvd_msg()
599 process_suspend_info(mddev, le32_to_cpu(msg->slot), in process_recvd_msg()
600 le64_to_cpu(msg->low), in process_recvd_msg()
601 le64_to_cpu(msg->high)); in process_recvd_msg()
613 __recover_slot(mddev, le32_to_cpu(msg->slot)); in process_recvd_msg()
616 ret = -1; in process_recvd_msg()
618 __func__, __LINE__, msg->slot); in process_recvd_msg()
628 struct md_cluster_info *cinfo = thread->mddev->cluster_info; in recv_daemon()
629 struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres; in recv_daemon()
630 struct dlm_lock_resource *message_lockres = cinfo->message_lockres; in recv_daemon()
634 mutex_lock(&cinfo->recv_mutex); in recv_daemon()
638 mutex_unlock(&cinfo->recv_mutex); in recv_daemon()
643 memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg)); in recv_daemon()
644 ret = process_recvd_msg(thread->mddev, &msg); in recv_daemon()
652 /*up-convert to PR on message_lockres*/ in recv_daemon()
665 mutex_unlock(&cinfo->recv_mutex); in recv_daemon()
675 struct mddev *mddev = cinfo->mddev; in lock_token()
684 &cinfo->state)) { in lock_token()
686 &cinfo->state); in lock_token()
688 md_wakeup_thread(mddev->thread); in lock_token()
691 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); in lock_token()
693 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in lock_token()
696 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", in lock_token()
700 mutex_lock(&cinfo->recv_mutex); in lock_token()
709 wait_event(cinfo->wait, in lock_comm()
710 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); in lock_comm()
717 WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); in unlock_comm()
718 mutex_unlock(&cinfo->recv_mutex); in unlock_comm()
719 dlm_unlock_sync(cinfo->token_lockres); in unlock_comm()
720 clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state); in unlock_comm()
721 wake_up(&cinfo->wait); in unlock_comm()
728 * 1. Grabs the message lockresource in EX mode
739 int slot = cinfo->slot_number - 1; in __sendmsg()
741 cmsg->slot = cpu_to_le32(slot); in __sendmsg()
743 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); in __sendmsg()
745 pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); in __sendmsg()
749 memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, in __sendmsg()
751 /*down-convert EX to CW on Message*/ in __sendmsg()
752 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW); in __sendmsg()
754 pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n", in __sendmsg()
759 /*up-convert CR to EX on Ack*/ in __sendmsg()
760 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX); in __sendmsg()
762 pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n", in __sendmsg()
767 /*down-convert EX to CR on Ack*/ in __sendmsg()
768 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR); in __sendmsg()
770 pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n", in __sendmsg()
776 error = dlm_unlock_sync(cinfo->message_lockres); in __sendmsg()
778 pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", in __sendmsg()
800 struct md_cluster_info *cinfo = mddev->cluster_info; in gather_all_resync_info()
813 return -ENOMEM; in gather_all_resync_info()
814 if (i == (cinfo->slot_number - 1)) { in gather_all_resync_info()
819 bm_lockres->flags |= DLM_LKF_NOQUEUE; in gather_all_resync_info()
821 if (ret == -EAGAIN) { in gather_all_resync_info()
826 (unsigned long long) s->lo, in gather_all_resync_info()
827 (unsigned long long) s->hi, i); in gather_all_resync_info()
828 spin_lock_irq(&cinfo->suspend_lock); in gather_all_resync_info()
829 s->slot = i; in gather_all_resync_info()
830 list_add(&s->list, &cinfo->suspend_list); in gather_all_resync_info()
831 spin_unlock_irq(&cinfo->suspend_lock); in gather_all_resync_info()
845 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); in gather_all_resync_info()
849 if ((hi > 0) && (lo < mddev->recovery_cp)) { in gather_all_resync_info()
850 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in gather_all_resync_info()
851 mddev->recovery_cp = lo; in gather_all_resync_info()
869 return -ENOMEM; in join()
871 INIT_LIST_HEAD(&cinfo->suspend_list); in join()
872 spin_lock_init(&cinfo->suspend_lock); in join()
873 init_completion(&cinfo->completion); in join()
874 set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); in join()
875 init_waitqueue_head(&cinfo->wait); in join()
876 mutex_init(&cinfo->recv_mutex); in join()
878 mddev->cluster_info = cinfo; in join()
879 cinfo->mddev = mddev; in join()
882 sprintf(str, "%pU", mddev->uuid); in join()
883 ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, in join()
885 &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace); in join()
888 wait_for_completion(&cinfo->completion); in join()
889 if (nodes < cinfo->slot_number) { in join()
890 pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).", in join()
891 cinfo->slot_number, nodes); in join()
892 ret = -ERANGE; in join()
896 ret = -ENOMEM; in join()
897 cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv"); in join()
898 if (!cinfo->recv_thread) { in join()
899 pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); in join()
902 cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1); in join()
903 if (!cinfo->message_lockres) in join()
905 cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0); in join()
906 if (!cinfo->token_lockres) in join()
908 cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0); in join()
909 if (!cinfo->no_new_dev_lockres) in join()
912 ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); in join()
914 ret = -EAGAIN; in join()
915 pr_err("md-cluster: can't join cluster to avoid lock issue\n"); in join()
918 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); in join()
919 if (!cinfo->ack_lockres) { in join()
920 ret = -ENOMEM; in join()
924 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) in join()
925 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", in join()
927 dlm_unlock_sync(cinfo->token_lockres); in join()
928 /* get sync CR lock on no-new-dev. */ in join()
929 if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR)) in join()
930 pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret); in join()
933 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); in join()
934 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); in join()
935 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); in join()
936 if (!cinfo->bitmap_lockres) { in join()
937 ret = -ENOMEM; in join()
940 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { in join()
942 ret = -EINVAL; in join()
946 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); in join()
947 if (!cinfo->resync_lockres) { in join()
948 ret = -ENOMEM; in join()
954 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in join()
955 md_unregister_thread(&cinfo->recovery_thread); in join()
956 md_unregister_thread(&cinfo->recv_thread); in join()
957 lockres_free(cinfo->message_lockres); in join()
958 lockres_free(cinfo->token_lockres); in join()
959 lockres_free(cinfo->ack_lockres); in join()
960 lockres_free(cinfo->no_new_dev_lockres); in join()
961 lockres_free(cinfo->resync_lockres); in join()
962 lockres_free(cinfo->bitmap_lockres); in join()
963 if (cinfo->lockspace) in join()
964 dlm_release_lockspace(cinfo->lockspace, 2); in join()
965 mddev->cluster_info = NULL; in join()
972 struct md_cluster_info *cinfo = mddev->cluster_info; in load_bitmaps()
976 pr_err("md-cluster: failed to gather all resyn infos\n"); in load_bitmaps()
977 set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state); in load_bitmaps()
979 if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state)) in load_bitmaps()
980 md_wakeup_thread(cinfo->recv_thread); in load_bitmaps()
985 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_bitmap()
999 struct md_cluster_info *cinfo = mddev->cluster_info; in leave()
1005 * is leaving the cluster with dirty bitmap, also we in leave()
1007 if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) in leave()
1010 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in leave()
1011 md_unregister_thread(&cinfo->recovery_thread); in leave()
1012 md_unregister_thread(&cinfo->recv_thread); in leave()
1013 lockres_free(cinfo->message_lockres); in leave()
1014 lockres_free(cinfo->token_lockres); in leave()
1015 lockres_free(cinfo->ack_lockres); in leave()
1016 lockres_free(cinfo->no_new_dev_lockres); in leave()
1017 lockres_free(cinfo->resync_lockres); in leave()
1018 lockres_free(cinfo->bitmap_lockres); in leave()
1020 dlm_release_lockspace(cinfo->lockspace, 2); in leave()
1026 * DLM starts the slot numbers from 1, wheras cluster-md
1031 struct md_cluster_info *cinfo = mddev->cluster_info; in slot_number()
1033 return cinfo->slot_number - 1; in slot_number()
1039 * If it is already locked, token is in EX mode, and hence lock_token()
1044 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_start()
1052 &cinfo->state); in metadata_update_start()
1054 md_wakeup_thread(mddev->thread); in metadata_update_start()
1056 wait_event(cinfo->wait, in metadata_update_start()
1057 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) || in metadata_update_start()
1058 test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state)); in metadata_update_start()
1061 if (cinfo->token_lockres->mode == DLM_LOCK_EX) { in metadata_update_start()
1062 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in metadata_update_start()
1067 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in metadata_update_start()
1073 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_finish()
1077 int raid_slot = -1; in metadata_update_finish()
1084 if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { in metadata_update_finish()
1085 raid_slot = rdev->desc_nr; in metadata_update_finish()
1092 pr_warn("md-cluster: No good device id found to send\n"); in metadata_update_finish()
1093 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in metadata_update_finish()
1100 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_cancel()
1101 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in metadata_update_cancel()
1113 int node_num = mddev->bitmap_info.nodes; in cluster_check_sync_size()
1114 int current_slot = md_cluster_ops->slot_number(mddev); in cluster_check_sync_size()
1115 struct bitmap *bitmap = mddev->bitmap; in cluster_check_sync_size()
1119 sb = kmap_atomic(bitmap->storage.sb_page); in cluster_check_sync_size()
1120 my_sync_size = sb->sync_size; in cluster_check_sync_size()
1130 return -1; in cluster_check_sync_size()
1140 pr_err("md-cluster: Cannot initialize %s\n", str); in cluster_check_sync_size()
1142 return -1; in cluster_check_sync_size()
1144 bm_lockres->flags |= DLM_LKF_NOQUEUE; in cluster_check_sync_size()
1150 sb = kmap_atomic(bitmap->storage.sb_page); in cluster_check_sync_size()
1152 sync_size = sb->sync_size; in cluster_check_sync_size()
1153 else if (sync_size != sb->sync_size) { in cluster_check_sync_size()
1156 return -1; in cluster_check_sync_size()
1162 return (my_sync_size == sync_size) ? 0 : -1; in cluster_check_sync_size()
1166 * Update the size for cluster raid is a little more complex, we perform it
1177 struct md_cluster_info *cinfo = mddev->cluster_info; in update_size()
1181 int raid_slot = -1; in update_size()
1189 if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { in update_size()
1190 raid_slot = rdev->desc_nr; in update_size()
1208 pr_err("md-cluster: No good device id found to send\n"); in update_size()
1225 set_capacity(mddev->gendisk, mddev->array_sectors); in update_size()
1226 revalidate_disk(mddev->gendisk); in update_size()
1229 ret = mddev->pers->resize(mddev, old_dev_sectors); in update_size()
1231 revalidate_disk(mddev->gendisk); in update_size()
1242 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_start()
1243 return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev); in resync_start()
1248 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_info_update()
1254 memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); in resync_info_update()
1259 add_resync_info(cinfo->bitmap_lockres, lo, hi); in resync_info_update()
1260 /* Re-acquire the lock to refresh LVB */ in resync_info_update()
1261 dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); in resync_info_update()
1268 * resync_finish (md_reap_sync_thread -> resync_finish) in resync_info_update()
1278 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_finish()
1281 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in resync_finish()
1287 if (!test_bit(MD_CLOSING, &mddev->flags)) in resync_finish()
1289 dlm_unlock_sync(cinfo->resync_lockres); in resync_finish()
1296 struct md_cluster_info *cinfo = mddev->cluster_info; in area_resyncing()
1301 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) in area_resyncing()
1304 spin_lock_irq(&cinfo->suspend_lock); in area_resyncing()
1305 if (list_empty(&cinfo->suspend_list)) in area_resyncing()
1307 list_for_each_entry(s, &cinfo->suspend_list, list) in area_resyncing()
1308 if (hi > s->lo && lo < s->hi) { in area_resyncing()
1313 spin_unlock_irq(&cinfo->suspend_lock); in area_resyncing()
1317 /* add_new_disk() - initiates a disk add
1323 struct md_cluster_info *cinfo = mddev->cluster_info; in add_new_disk()
1326 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in add_new_disk()
1327 char *uuid = sb->device_uuid; in add_new_disk()
1332 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in add_new_disk()
1339 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; in add_new_disk()
1340 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); in add_new_disk()
1341 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; in add_new_disk()
1343 if (ret == -EAGAIN) in add_new_disk()
1344 ret = -ENOENT; in add_new_disk()
1348 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); in add_new_disk()
1352 * md_wakeup_thread(mddev->thread) in add_new_disk()
1353 * -> conf->thread (raid1d) in add_new_disk()
1354 * -> md_check_recovery -> md_update_sb in add_new_disk()
1355 * -> metadata_update_start/finish in add_new_disk()
1361 set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in add_new_disk()
1362 wake_up(&cinfo->wait); in add_new_disk()
1369 struct md_cluster_info *cinfo = mddev->cluster_info; in add_new_disk_cancel()
1370 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in add_new_disk_cancel()
1376 struct md_cluster_info *cinfo = mddev->cluster_info; in new_disk_ack()
1378 if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) { in new_disk_ack()
1379 pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); in new_disk_ack()
1380 return -EINVAL; in new_disk_ack()
1384 dlm_unlock_sync(cinfo->no_new_dev_lockres); in new_disk_ack()
1385 complete(&cinfo->newdisk_completion); in new_disk_ack()
1392 struct md_cluster_info *cinfo = mddev->cluster_info; in remove_disk()
1394 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in remove_disk()
1402 struct md_cluster_info *cinfo = mddev->cluster_info; in lock_all_bitmaps()
1404 cinfo->other_bitmap_lockres = in lock_all_bitmaps()
1405 kcalloc(mddev->bitmap_info.nodes - 1, in lock_all_bitmaps()
1407 if (!cinfo->other_bitmap_lockres) { in lock_all_bitmaps()
1413 for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) { in lock_all_bitmaps()
1419 cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1); in lock_all_bitmaps()
1420 if (!cinfo->other_bitmap_lockres[i]) in lock_all_bitmaps()
1421 return -ENOMEM; in lock_all_bitmaps()
1423 cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE; in lock_all_bitmaps()
1424 ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW); in lock_all_bitmaps()
1426 held = -1; in lock_all_bitmaps()
1435 struct md_cluster_info *cinfo = mddev->cluster_info; in unlock_all_bitmaps()
1439 if (cinfo->other_bitmap_lockres) { in unlock_all_bitmaps()
1440 for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) { in unlock_all_bitmaps()
1441 if (cinfo->other_bitmap_lockres[i]) { in unlock_all_bitmaps()
1442 lockres_free(cinfo->other_bitmap_lockres[i]); in unlock_all_bitmaps()
1445 kfree(cinfo->other_bitmap_lockres); in unlock_all_bitmaps()
1446 cinfo->other_bitmap_lockres = NULL; in unlock_all_bitmaps()
1455 struct mddev *mddev = rdev->mddev; in gather_bitmaps()
1456 struct md_cluster_info *cinfo = mddev->cluster_info; in gather_bitmaps()
1459 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in gather_bitmaps()
1464 for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { in gather_bitmaps()
1465 if (sn == (cinfo->slot_number - 1)) in gather_bitmaps()
1469 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); in gather_bitmaps()
1472 if ((hi > 0) && (lo < mddev->recovery_cp)) in gather_bitmaps()
1473 mddev->recovery_cp = lo; in gather_bitmaps()
1503 pr_warn("md-cluster: support raid1 and raid10 (limited support)\n"); in cluster_init()
1504 pr_info("Registering Cluster MD functions\n"); in cluster_init()