• Home
  • Raw
  • Download

Lines Matching refs:res

51 				struct dlm_lock_resource *res,
78 struct dlm_lock_resource *res,
87 static int dlm_do_master_request(struct dlm_lock_resource *res,
92 struct dlm_lock_resource *res,
96 struct dlm_lock_resource *res,
100 struct dlm_lock_resource *res,
107 struct dlm_lock_resource *res);
109 struct dlm_lock_resource *res);
111 struct dlm_lock_resource *res,
114 struct dlm_lock_resource *res);
253 struct dlm_lock_resource *res, in dlm_init_mle() argument
278 BUG_ON(!res); in dlm_init_mle()
279 mle->mleres = res; in dlm_init_mle()
280 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
281 mle->mnamelen = res->lockname.len; in dlm_init_mle()
282 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
468 struct dlm_lock_resource *res; in dlm_lockres_release() local
471 res = container_of(kref, struct dlm_lock_resource, refs); in dlm_lockres_release()
472 dlm = res->dlm; in dlm_lockres_release()
476 BUG_ON(!res->lockname.name); in dlm_lockres_release()
478 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release()
479 res->lockname.name); in dlm_lockres_release()
483 if (!hlist_unhashed(&res->hash_node) || in dlm_lockres_release()
484 !list_empty(&res->granted) || in dlm_lockres_release()
485 !list_empty(&res->converting) || in dlm_lockres_release()
486 !list_empty(&res->blocked) || in dlm_lockres_release()
487 !list_empty(&res->dirty) || in dlm_lockres_release()
488 !list_empty(&res->recovering) || in dlm_lockres_release()
489 !list_empty(&res->purge)) { in dlm_lockres_release()
493 res->lockname.len, res->lockname.name, in dlm_lockres_release()
494 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', in dlm_lockres_release()
495 !list_empty(&res->granted) ? 'G' : ' ', in dlm_lockres_release()
496 !list_empty(&res->converting) ? 'C' : ' ', in dlm_lockres_release()
497 !list_empty(&res->blocked) ? 'B' : ' ', in dlm_lockres_release()
498 !list_empty(&res->dirty) ? 'D' : ' ', in dlm_lockres_release()
499 !list_empty(&res->recovering) ? 'R' : ' ', in dlm_lockres_release()
500 !list_empty(&res->purge) ? 'P' : ' '); in dlm_lockres_release()
502 dlm_print_one_lock_resource(res); in dlm_lockres_release()
507 BUG_ON(!hlist_unhashed(&res->hash_node)); in dlm_lockres_release()
508 BUG_ON(!list_empty(&res->granted)); in dlm_lockres_release()
509 BUG_ON(!list_empty(&res->converting)); in dlm_lockres_release()
510 BUG_ON(!list_empty(&res->blocked)); in dlm_lockres_release()
511 BUG_ON(!list_empty(&res->dirty)); in dlm_lockres_release()
512 BUG_ON(!list_empty(&res->recovering)); in dlm_lockres_release()
513 BUG_ON(!list_empty(&res->purge)); in dlm_lockres_release()
515 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); in dlm_lockres_release()
517 kmem_cache_free(dlm_lockres_cache, res); in dlm_lockres_release()
520 void dlm_lockres_put(struct dlm_lock_resource *res) in dlm_lockres_put() argument
522 kref_put(&res->refs, dlm_lockres_release); in dlm_lockres_put()
526 struct dlm_lock_resource *res, in dlm_init_lockres() argument
535 qname = (char *) res->lockname.name; in dlm_init_lockres()
538 res->lockname.len = namelen; in dlm_init_lockres()
539 res->lockname.hash = dlm_lockid_hash(name, namelen); in dlm_init_lockres()
541 init_waitqueue_head(&res->wq); in dlm_init_lockres()
542 spin_lock_init(&res->spinlock); in dlm_init_lockres()
543 INIT_HLIST_NODE(&res->hash_node); in dlm_init_lockres()
544 INIT_LIST_HEAD(&res->granted); in dlm_init_lockres()
545 INIT_LIST_HEAD(&res->converting); in dlm_init_lockres()
546 INIT_LIST_HEAD(&res->blocked); in dlm_init_lockres()
547 INIT_LIST_HEAD(&res->dirty); in dlm_init_lockres()
548 INIT_LIST_HEAD(&res->recovering); in dlm_init_lockres()
549 INIT_LIST_HEAD(&res->purge); in dlm_init_lockres()
550 INIT_LIST_HEAD(&res->tracking); in dlm_init_lockres()
551 atomic_set(&res->asts_reserved, 0); in dlm_init_lockres()
552 res->migration_pending = 0; in dlm_init_lockres()
553 res->inflight_locks = 0; in dlm_init_lockres()
554 res->inflight_assert_workers = 0; in dlm_init_lockres()
556 res->dlm = dlm; in dlm_init_lockres()
558 kref_init(&res->refs); in dlm_init_lockres()
564 spin_lock(&res->spinlock); in dlm_init_lockres()
565 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_init_lockres()
566 spin_unlock(&res->spinlock); in dlm_init_lockres()
568 res->state = DLM_LOCK_RES_IN_PROGRESS; in dlm_init_lockres()
570 res->last_used = 0; in dlm_init_lockres()
573 list_add_tail(&res->tracking, &dlm->tracking_list); in dlm_init_lockres()
576 memset(res->lvb, 0, DLM_LVB_LEN); in dlm_init_lockres()
577 memset(res->refmap, 0, sizeof(res->refmap)); in dlm_init_lockres()
584 struct dlm_lock_resource *res = NULL; in dlm_new_lockres() local
586 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); in dlm_new_lockres()
587 if (!res) in dlm_new_lockres()
590 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); in dlm_new_lockres()
591 if (!res->lockname.name) in dlm_new_lockres()
594 dlm_init_lockres(dlm, res, name, namelen); in dlm_new_lockres()
595 return res; in dlm_new_lockres()
598 if (res) in dlm_new_lockres()
599 kmem_cache_free(dlm_lockres_cache, res); in dlm_new_lockres()
604 struct dlm_lock_resource *res, int bit) in dlm_lockres_set_refmap_bit() argument
606 assert_spin_locked(&res->spinlock); in dlm_lockres_set_refmap_bit()
608 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit()
609 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_set_refmap_bit()
611 set_bit(bit, res->refmap); in dlm_lockres_set_refmap_bit()
615 struct dlm_lock_resource *res, int bit) in dlm_lockres_clear_refmap_bit() argument
617 assert_spin_locked(&res->spinlock); in dlm_lockres_clear_refmap_bit()
619 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit()
620 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_clear_refmap_bit()
622 clear_bit(bit, res->refmap); in dlm_lockres_clear_refmap_bit()
626 struct dlm_lock_resource *res) in __dlm_lockres_grab_inflight_ref() argument
628 res->inflight_locks++; in __dlm_lockres_grab_inflight_ref()
631 res->lockname.len, res->lockname.name, res->inflight_locks, in __dlm_lockres_grab_inflight_ref()
636 struct dlm_lock_resource *res) in dlm_lockres_grab_inflight_ref() argument
638 assert_spin_locked(&res->spinlock); in dlm_lockres_grab_inflight_ref()
639 __dlm_lockres_grab_inflight_ref(dlm, res); in dlm_lockres_grab_inflight_ref()
643 struct dlm_lock_resource *res) in dlm_lockres_drop_inflight_ref() argument
645 assert_spin_locked(&res->spinlock); in dlm_lockres_drop_inflight_ref()
647 BUG_ON(res->inflight_locks == 0); in dlm_lockres_drop_inflight_ref()
649 res->inflight_locks--; in dlm_lockres_drop_inflight_ref()
652 res->lockname.len, res->lockname.name, res->inflight_locks, in dlm_lockres_drop_inflight_ref()
655 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref()
659 struct dlm_lock_resource *res) in __dlm_lockres_grab_inflight_worker() argument
661 assert_spin_locked(&res->spinlock); in __dlm_lockres_grab_inflight_worker()
662 res->inflight_assert_workers++; in __dlm_lockres_grab_inflight_worker()
664 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_grab_inflight_worker()
665 res->inflight_assert_workers); in __dlm_lockres_grab_inflight_worker()
669 struct dlm_lock_resource *res) in __dlm_lockres_drop_inflight_worker() argument
671 assert_spin_locked(&res->spinlock); in __dlm_lockres_drop_inflight_worker()
672 BUG_ON(res->inflight_assert_workers == 0); in __dlm_lockres_drop_inflight_worker()
673 res->inflight_assert_workers--; in __dlm_lockres_drop_inflight_worker()
675 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_drop_inflight_worker()
676 res->inflight_assert_workers); in __dlm_lockres_drop_inflight_worker()
680 struct dlm_lock_resource *res) in dlm_lockres_drop_inflight_worker() argument
682 spin_lock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
683 __dlm_lockres_drop_inflight_worker(dlm, res); in dlm_lockres_drop_inflight_worker()
684 spin_unlock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
708 struct dlm_lock_resource *tmpres=NULL, *res=NULL; in dlm_get_lock_resource() local
768 if (res) { in dlm_get_lock_resource()
770 if (!list_empty(&res->tracking)) in dlm_get_lock_resource()
771 list_del_init(&res->tracking); in dlm_get_lock_resource()
775 res->lockname.len, in dlm_get_lock_resource()
776 res->lockname.name); in dlm_get_lock_resource()
778 dlm_lockres_put(res); in dlm_get_lock_resource()
780 res = tmpres; in dlm_get_lock_resource()
784 if (!res) { in dlm_get_lock_resource()
791 res = dlm_new_lockres(dlm, lockid, namelen); in dlm_get_lock_resource()
792 if (!res) in dlm_get_lock_resource()
797 mlog(0, "no lockres found, allocated our own: %p\n", res); in dlm_get_lock_resource()
802 spin_lock(&res->spinlock); in dlm_get_lock_resource()
803 dlm_change_lockres_owner(dlm, res, dlm->node_num); in dlm_get_lock_resource()
804 __dlm_insert_lockres(dlm, res); in dlm_get_lock_resource()
805 dlm_lockres_grab_inflight_ref(dlm, res); in dlm_get_lock_resource()
806 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
858 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
881 __dlm_insert_lockres(dlm, res); in dlm_get_lock_resource()
884 __dlm_lockres_grab_inflight_ref(dlm, res); in dlm_get_lock_resource()
902 if (!dlm_pre_master_reco_lockres(dlm, res)) in dlm_get_lock_resource()
938 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
957 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
961 "request now, blocked=%d\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
962 res->lockname.name, blocked); in dlm_get_lock_resource()
966 dlm->name, res->lockname.len, in dlm_get_lock_resource()
967 res->lockname.name, blocked); in dlm_get_lock_resource()
968 dlm_print_one_lock_resource(res); in dlm_get_lock_resource()
975 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
976 res->lockname.name, res->owner); in dlm_get_lock_resource()
978 BUG_ON(res->owner == O2NM_MAX_NODES); in dlm_get_lock_resource()
987 spin_lock(&res->spinlock); in dlm_get_lock_resource()
988 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; in dlm_get_lock_resource()
989 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
990 wake_up(&res->wq); in dlm_get_lock_resource()
997 return res; in dlm_get_lock_resource()
1004 struct dlm_lock_resource *res, in dlm_wait_for_lock_mastery() argument
1018 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1019 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_wait_for_lock_mastery()
1021 res->lockname.len, res->lockname.name, res->owner); in dlm_wait_for_lock_mastery()
1022 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1025 if (res->owner != dlm->node_num) { in dlm_wait_for_lock_mastery()
1026 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1029 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); in dlm_wait_for_lock_mastery()
1037 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1050 dlm->name, res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1051 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1055 dlm->name, res->lockname.len, res->lockname.name, in dlm_wait_for_lock_mastery()
1065 "rechecking now\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1066 res->lockname.name); in dlm_wait_for_lock_mastery()
1071 "for %s:%.*s\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1072 res->lockname.name); in dlm_wait_for_lock_mastery()
1109 if (res->owner == O2NM_MAX_NODES) { in dlm_wait_for_lock_mastery()
1111 res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1114 mlog(0, "done waiting, master is %u\n", res->owner); in dlm_wait_for_lock_mastery()
1123 res->lockname.len, res->lockname.name, m); in dlm_wait_for_lock_mastery()
1124 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1139 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1142 dlm_change_lockres_owner(dlm, res, m); in dlm_wait_for_lock_mastery()
1143 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1209 struct dlm_lock_resource *res, in dlm_restart_lock_mastery() argument
1256 res->lockname.len, in dlm_restart_lock_mastery()
1257 res->lockname.name, in dlm_restart_lock_mastery()
1272 res->lockname.len, in dlm_restart_lock_mastery()
1273 res->lockname.name); in dlm_restart_lock_mastery()
1275 mle->mleres = res; in dlm_restart_lock_mastery()
1308 static int dlm_do_master_request(struct dlm_lock_resource *res, in dlm_do_master_request() argument
1360 "reference\n", dlm->name, res->lockname.len, in dlm_do_master_request()
1361 res->lockname.name, to); in dlm_do_master_request()
1407 struct dlm_lock_resource *res = NULL; in dlm_master_request_handler() local
1436 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_master_request_handler()
1437 if (res) { in dlm_master_request_handler()
1441 spin_lock(&res->spinlock); in dlm_master_request_handler()
1448 if (hlist_unhashed(&res->hash_node)) { in dlm_master_request_handler()
1449 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1450 dlm_lockres_put(res); in dlm_master_request_handler()
1454 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_master_request_handler()
1456 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1465 if (res->owner == dlm->node_num) { in dlm_master_request_handler()
1466 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); in dlm_master_request_handler()
1467 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1480 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1481 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1492 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_master_request_handler()
1531 dlm_lockres_set_refmap_bit(dlm, res, in dlm_master_request_handler()
1545 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1618 dlm->node_num, res->lockname.len, res->lockname.name); in dlm_master_request_handler()
1619 spin_lock(&res->spinlock); in dlm_master_request_handler()
1620 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, in dlm_master_request_handler()
1625 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1626 dlm_lockres_put(res); in dlm_master_request_handler()
1629 __dlm_lockres_grab_inflight_worker(dlm, res); in dlm_master_request_handler()
1630 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1633 if (res) in dlm_master_request_handler()
1634 dlm_lockres_put(res); in dlm_master_request_handler()
1653 struct dlm_lock_resource *res, in dlm_do_assert_master() argument
1661 const char *lockname = res->lockname.name; in dlm_do_assert_master()
1662 unsigned int namelen = res->lockname.len; in dlm_do_assert_master()
1666 spin_lock(&res->spinlock); in dlm_do_assert_master()
1667 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1668 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1735 spin_lock(&res->spinlock); in dlm_do_assert_master()
1736 dlm_lockres_set_refmap_bit(dlm, res, to); in dlm_do_assert_master()
1737 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1744 spin_lock(&res->spinlock); in dlm_do_assert_master()
1745 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1746 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1747 wake_up(&res->wq); in dlm_do_assert_master()
1767 struct dlm_lock_resource *res = NULL; in dlm_assert_master_handler() local
1845 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_assert_master_handler()
1846 if (res) { in dlm_assert_master_handler()
1847 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1848 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_assert_master_handler()
1854 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && in dlm_assert_master_handler()
1855 res->owner != assert->node_idx) { in dlm_assert_master_handler()
1858 assert->node_idx, res->owner, namelen, in dlm_assert_master_handler()
1860 __dlm_print_one_lock_resource(res); in dlm_assert_master_handler()
1864 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_assert_master_handler()
1866 if (res->owner == assert->node_idx) { in dlm_assert_master_handler()
1875 res->owner, namelen, name); in dlm_assert_master_handler()
1878 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_assert_master_handler()
1899 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1929 if (res) { in dlm_assert_master_handler()
1931 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1935 res->lockname.len, res->lockname.name, in dlm_assert_master_handler()
1937 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_assert_master_handler()
1939 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1940 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); in dlm_assert_master_handler()
1942 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1944 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1947 wake_up(&res->wq); in dlm_assert_master_handler()
1985 } else if (res) { in dlm_assert_master_handler()
1986 if (res->owner != assert->node_idx) { in dlm_assert_master_handler()
1989 res->owner, namelen, name); in dlm_assert_master_handler()
1996 if (res) { in dlm_assert_master_handler()
1997 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1998 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_handler()
1999 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2000 *ret_data = (void *)res; in dlm_assert_master_handler()
2025 __dlm_print_one_lock_resource(res); in dlm_assert_master_handler()
2026 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2032 *ret_data = (void *)res; in dlm_assert_master_handler()
2039 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; in dlm_assert_master_post_handler() local
2042 spin_lock(&res->spinlock); in dlm_assert_master_post_handler()
2043 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_post_handler()
2044 spin_unlock(&res->spinlock); in dlm_assert_master_post_handler()
2045 wake_up(&res->wq); in dlm_assert_master_post_handler()
2046 dlm_lockres_put(res); in dlm_assert_master_post_handler()
2052 struct dlm_lock_resource *res, in dlm_dispatch_assert_master() argument
2063 item->u.am.lockres = res; /* already have a ref */ in dlm_dispatch_assert_master()
2070 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, in dlm_dispatch_assert_master()
2071 res->lockname.name); in dlm_dispatch_assert_master()
2085 struct dlm_lock_resource *res; in dlm_assert_master_worker() local
2093 res = item->u.am.lockres; in dlm_assert_master_worker()
2124 spin_lock(&res->spinlock); in dlm_assert_master_worker()
2125 if (res->state & DLM_LOCK_RES_MIGRATING) { in dlm_assert_master_worker()
2129 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2132 __dlm_lockres_reserve_ast(res); in dlm_assert_master_worker()
2133 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2138 res->lockname.len, res->lockname.name, dlm->node_num); in dlm_assert_master_worker()
2139 ret = dlm_do_assert_master(dlm, res, nodemap, flags); in dlm_assert_master_worker()
2147 dlm_lockres_release_ast(dlm, res); in dlm_assert_master_worker()
2150 dlm_lockres_drop_inflight_worker(dlm, res); in dlm_assert_master_worker()
2152 dlm_lockres_put(res); in dlm_assert_master_worker()
2168 struct dlm_lock_resource *res) in dlm_pre_master_reco_lockres() argument
2183 ret = dlm_do_master_requery(dlm, res, nodenum, &master); in dlm_pre_master_reco_lockres()
2217 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) in dlm_drop_lockres_ref() argument
2224 lockname = res->lockname.name; in dlm_drop_lockres_ref()
2225 namelen = res->lockname.len; in dlm_drop_lockres_ref()
2234 &deref, sizeof(deref), res->owner, &r); in dlm_drop_lockres_ref()
2237 dlm->name, namelen, lockname, ret, res->owner); in dlm_drop_lockres_ref()
2241 dlm->name, namelen, lockname, res->owner, r); in dlm_drop_lockres_ref()
2242 dlm_print_one_lock_resource(res); in dlm_drop_lockres_ref()
2256 struct dlm_lock_resource *res = NULL; in dlm_deref_lockres_handler() local
2285 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); in dlm_deref_lockres_handler()
2286 if (!res) { in dlm_deref_lockres_handler()
2294 spin_lock(&res->spinlock); in dlm_deref_lockres_handler()
2295 if (res->state & DLM_LOCK_RES_SETREF_INPROG) in dlm_deref_lockres_handler()
2298 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_handler()
2299 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_handler()
2300 dlm_lockres_clear_refmap_bit(dlm, res, node); in dlm_deref_lockres_handler()
2304 spin_unlock(&res->spinlock); in dlm_deref_lockres_handler()
2308 dlm_lockres_calc_usage(dlm, res); in dlm_deref_lockres_handler()
2312 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_handler()
2313 dlm_print_one_lock_resource(res); in dlm_deref_lockres_handler()
2327 item->u.dl.deref_res = res; in dlm_deref_lockres_handler()
2338 if (res) in dlm_deref_lockres_handler()
2339 dlm_lockres_put(res); in dlm_deref_lockres_handler()
2351 struct dlm_lock_resource *res = NULL; in dlm_deref_lockres_done_handler() local
2377 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); in dlm_deref_lockres_done_handler()
2378 if (!res) { in dlm_deref_lockres_done_handler()
2385 spin_lock(&res->spinlock); in dlm_deref_lockres_done_handler()
2386 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { in dlm_deref_lockres_done_handler()
2387 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2391 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_done_handler()
2396 __dlm_do_purge_lockres(dlm, res); in dlm_deref_lockres_done_handler()
2397 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2398 wake_up(&res->wq); in dlm_deref_lockres_done_handler()
2404 if (res) in dlm_deref_lockres_done_handler()
2405 dlm_lockres_put(res); in dlm_deref_lockres_done_handler()
2411 struct dlm_lock_resource *res, u8 node) in dlm_drop_lockres_ref_done() argument
2418 lockname = res->lockname.name; in dlm_drop_lockres_ref_done()
2419 namelen = res->lockname.len; in dlm_drop_lockres_ref_done()
2437 dlm_print_one_lock_resource(res); in dlm_drop_lockres_ref_done()
2444 struct dlm_lock_resource *res; in dlm_deref_lockres_worker() local
2449 res = item->u.dl.deref_res; in dlm_deref_lockres_worker()
2452 spin_lock(&res->spinlock); in dlm_deref_lockres_worker()
2453 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_worker()
2454 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); in dlm_deref_lockres_worker()
2455 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_worker()
2456 dlm_lockres_clear_refmap_bit(dlm, res, node); in dlm_deref_lockres_worker()
2459 spin_unlock(&res->spinlock); in dlm_deref_lockres_worker()
2461 dlm_drop_lockres_ref_done(dlm, res, node); in dlm_deref_lockres_worker()
2465 dlm->name, res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2466 dlm_lockres_calc_usage(dlm, res); in dlm_deref_lockres_worker()
2470 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2471 dlm_print_one_lock_resource(res); in dlm_deref_lockres_worker()
2474 dlm_lockres_put(res); in dlm_deref_lockres_worker()
2485 struct dlm_lock_resource *res) in dlm_is_lockres_migratable() argument
2493 assert_spin_locked(&res->spinlock); in dlm_is_lockres_migratable()
2496 if (res->state & DLM_LOCK_RES_MIGRATING) in dlm_is_lockres_migratable()
2500 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_is_lockres_migratable()
2504 if (res->owner != dlm->node_num) in dlm_is_lockres_migratable()
2508 queue = dlm_list_idx_to_ptr(res, idx); in dlm_is_lockres_migratable()
2516 "%s list\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2517 res->lockname.name, in dlm_is_lockres_migratable()
2526 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); in dlm_is_lockres_migratable()
2531 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2532 res->lockname.name); in dlm_is_lockres_migratable()
2543 struct dlm_lock_resource *res, u8 target) in dlm_migrate_lockres() argument
2559 name = res->lockname.name; in dlm_migrate_lockres()
2560 namelen = res->lockname.len; in dlm_migrate_lockres()
2586 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2608 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { in dlm_migrate_lockres()
2610 "the target went down.\n", res->lockname.len, in dlm_migrate_lockres()
2611 res->lockname.name, target); in dlm_migrate_lockres()
2612 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2613 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2615 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2652 ret = dlm_send_one_lockres(dlm, res, mres, target, in dlm_migrate_lockres()
2662 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2663 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2665 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2691 res->owner == target) in dlm_migrate_lockres()
2695 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2701 dlm->name, res->lockname.len, in dlm_migrate_lockres()
2702 res->lockname.name, target); in dlm_migrate_lockres()
2708 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2709 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2711 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2716 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2720 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2721 dlm_set_lockres_owner(dlm, res, target); in dlm_migrate_lockres()
2722 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2723 dlm_remove_nonlocal_locks(dlm, res); in dlm_migrate_lockres()
2724 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2725 wake_up(&res->wq); in dlm_migrate_lockres()
2732 dlm_lockres_calc_usage(dlm, res); in dlm_migrate_lockres()
2737 dlm_kick_thread(dlm, res); in dlm_migrate_lockres()
2742 wake_up(&res->wq); in dlm_migrate_lockres()
2766 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) in dlm_empty_lockres() argument
2774 spin_lock(&res->spinlock); in dlm_empty_lockres()
2775 if (dlm_is_lockres_migratable(dlm, res)) in dlm_empty_lockres()
2776 target = dlm_pick_migration_target(dlm, res); in dlm_empty_lockres()
2777 spin_unlock(&res->spinlock); in dlm_empty_lockres()
2785 ret = dlm_migrate_lockres(dlm, res, target); in dlm_empty_lockres()
2788 dlm->name, res->lockname.len, res->lockname.name, in dlm_empty_lockres()
2807 struct dlm_lock_resource *res, in dlm_migration_can_proceed() argument
2811 spin_lock(&res->spinlock); in dlm_migration_can_proceed()
2812 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); in dlm_migration_can_proceed()
2813 spin_unlock(&res->spinlock); in dlm_migration_can_proceed()
2825 struct dlm_lock_resource *res) in dlm_lockres_is_dirty() argument
2828 spin_lock(&res->spinlock); in dlm_lockres_is_dirty()
2829 ret = !!(res->state & DLM_LOCK_RES_DIRTY); in dlm_lockres_is_dirty()
2830 spin_unlock(&res->spinlock); in dlm_lockres_is_dirty()
2836 struct dlm_lock_resource *res, in dlm_mark_lockres_migrating() argument
2842 res->lockname.len, res->lockname.name, dlm->node_num, in dlm_mark_lockres_migrating()
2846 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2847 BUG_ON(res->migration_pending); in dlm_mark_lockres_migrating()
2848 res->migration_pending = 1; in dlm_mark_lockres_migrating()
2851 __dlm_lockres_reserve_ast(res); in dlm_mark_lockres_migrating()
2852 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2855 dlm_kick_thread(dlm, res); in dlm_mark_lockres_migrating()
2858 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2859 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); in dlm_mark_lockres_migrating()
2860 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2861 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2863 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); in dlm_mark_lockres_migrating()
2864 dlm_lockres_release_ast(dlm, res); in dlm_mark_lockres_migrating()
2867 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); in dlm_mark_lockres_migrating()
2873 dlm_migration_can_proceed(dlm, res, target), in dlm_mark_lockres_migrating()
2877 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2881 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2884 if (!dlm_migration_can_proceed(dlm, res, target)) { in dlm_mark_lockres_migrating()
2904 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2905 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); in dlm_mark_lockres_migrating()
2906 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2908 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); in dlm_mark_lockres_migrating()
2910 res->migration_pending = 0; in dlm_mark_lockres_migrating()
2911 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2928 struct dlm_lock_resource *res) in dlm_remove_nonlocal_locks() argument
2930 struct list_head *queue = &res->granted; in dlm_remove_nonlocal_locks()
2934 assert_spin_locked(&res->spinlock); in dlm_remove_nonlocal_locks()
2936 BUG_ON(res->owner == dlm->node_num); in dlm_remove_nonlocal_locks()
2948 dlm_lockres_clear_refmap_bit(dlm, res, in dlm_remove_nonlocal_locks()
2961 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); in dlm_remove_nonlocal_locks()
2969 res->lockname.len, res->lockname.name, bit); in dlm_remove_nonlocal_locks()
2970 dlm_lockres_clear_refmap_bit(dlm, res, bit); in dlm_remove_nonlocal_locks()
2982 struct dlm_lock_resource *res) in dlm_pick_migration_target() argument
2985 struct list_head *queue = &res->granted; in dlm_pick_migration_target()
2991 assert_spin_locked(&res->spinlock); in dlm_pick_migration_target()
2995 queue = dlm_list_idx_to_ptr(res, idx); in dlm_pick_migration_target()
3009 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, in dlm_pick_migration_target()
3028 struct dlm_lock_resource *res, in dlm_do_migrate_request() argument
3037 migrate.namelen = res->lockname.len; in dlm_do_migrate_request()
3038 memcpy(migrate.name, res->lockname.name, migrate.namelen); in dlm_do_migrate_request()
3081 dlm->name, res->lockname.len, res->lockname.name, in dlm_do_migrate_request()
3083 spin_lock(&res->spinlock); in dlm_do_migrate_request()
3084 dlm_lockres_set_refmap_bit(dlm, res, nodenum); in dlm_do_migrate_request()
3085 spin_unlock(&res->spinlock); in dlm_do_migrate_request()
3108 struct dlm_lock_resource *res = NULL; in dlm_migrate_request_handler() local
3132 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_migrate_request_handler()
3133 if (res) { in dlm_migrate_request_handler()
3134 spin_lock(&res->spinlock); in dlm_migrate_request_handler()
3135 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_migrate_request_handler()
3139 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3146 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_migrate_request_handler()
3147 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3152 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3170 if (res) in dlm_migrate_request_handler()
3171 dlm_lockres_put(res); in dlm_migrate_request_handler()
3185 struct dlm_lock_resource *res, in dlm_add_migration_mle() argument
3245 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3263 struct dlm_lock_resource *res; in dlm_reset_mleres_owner() local
3266 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3268 if (res) { in dlm_reset_mleres_owner()
3272 spin_lock(&res->spinlock); in dlm_reset_mleres_owner()
3273 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_reset_mleres_owner()
3274 dlm_move_lockres_to_recovery_list(dlm, res); in dlm_reset_mleres_owner()
3275 spin_unlock(&res->spinlock); in dlm_reset_mleres_owner()
3276 dlm_lockres_put(res); in dlm_reset_mleres_owner()
3287 return res; in dlm_reset_mleres_owner()
3335 struct dlm_lock_resource *res; in dlm_clean_master_list() local
3404 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3405 if (res) in dlm_clean_master_list()
3416 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, in dlm_finish_migration() argument
3431 spin_lock(&res->spinlock); in dlm_finish_migration()
3432 dlm_lockres_set_refmap_bit(dlm, res, old_master); in dlm_finish_migration()
3433 spin_unlock(&res->spinlock); in dlm_finish_migration()
3436 ret = dlm_do_migrate_request(dlm, res, old_master, in dlm_finish_migration()
3444 res->lockname.len, res->lockname.name); in dlm_finish_migration()
3447 ret = dlm_do_assert_master(dlm, res, iter.node_map, in dlm_finish_migration()
3458 res->lockname.len, res->lockname.name, old_master); in dlm_finish_migration()
3459 ret = dlm_do_assert_master(dlm, res, iter.node_map, in dlm_finish_migration()
3470 spin_lock(&res->spinlock); in dlm_finish_migration()
3471 dlm_set_lockres_owner(dlm, res, dlm->node_num); in dlm_finish_migration()
3472 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_finish_migration()
3473 spin_unlock(&res->spinlock); in dlm_finish_migration()
3475 dlm_kick_thread(dlm, res); in dlm_finish_migration()
3476 wake_up(&res->wq); in dlm_finish_migration()
3490 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) in __dlm_lockres_reserve_ast() argument
3492 assert_spin_locked(&res->spinlock); in __dlm_lockres_reserve_ast()
3493 if (res->state & DLM_LOCK_RES_MIGRATING) { in __dlm_lockres_reserve_ast()
3494 __dlm_print_one_lock_resource(res); in __dlm_lockres_reserve_ast()
3496 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in __dlm_lockres_reserve_ast()
3498 atomic_inc(&res->asts_reserved); in __dlm_lockres_reserve_ast()
3515 struct dlm_lock_resource *res) in dlm_lockres_release_ast() argument
3517 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) in dlm_lockres_release_ast()
3520 if (!res->migration_pending) { in dlm_lockres_release_ast()
3521 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3525 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in dlm_lockres_release_ast()
3526 res->migration_pending = 0; in dlm_lockres_release_ast()
3527 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_lockres_release_ast()
3528 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3529 wake_up(&res->wq); in dlm_lockres_release_ast()