• Home
  • Raw
  • Download

Lines Matching refs:mle

56 			      struct dlm_master_list_entry *mle,
60 struct dlm_master_list_entry *mle,
71 struct dlm_master_list_entry *mle, in dlm_mle_equal() argument
75 if (dlm != mle->dlm) in dlm_mle_equal()
78 if (namelen != mle->mnamelen || in dlm_mle_equal()
79 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
99 struct dlm_master_list_entry **mle,
103 struct dlm_master_list_entry *mle, int to);
108 struct dlm_master_list_entry *mle,
112 struct dlm_master_list_entry *mle,
116 struct dlm_master_list_entry *mle,
179 struct dlm_master_list_entry *mle) in __dlm_mle_attach_hb_events() argument
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
188 struct dlm_master_list_entry *mle) in __dlm_mle_detach_hb_events() argument
190 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
191 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
196 struct dlm_master_list_entry *mle) in dlm_mle_detach_hb_events() argument
199 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_detach_hb_events()
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) in dlm_get_mle_inuse() argument
206 dlm = mle->dlm; in dlm_get_mle_inuse()
210 mle->inuse++; in dlm_get_mle_inuse()
211 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) in dlm_put_mle_inuse() argument
217 dlm = mle->dlm; in dlm_put_mle_inuse()
221 mle->inuse--; in dlm_put_mle_inuse()
222 __dlm_put_mle(mle); in dlm_put_mle_inuse()
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) in __dlm_put_mle() argument
232 dlm = mle->dlm; in __dlm_put_mle()
236 if (!atomic_read(&mle->mle_refs.refcount)) { in __dlm_put_mle()
239 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
240 dlm_print_one_mle(mle); in __dlm_put_mle()
243 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
248 static void dlm_put_mle(struct dlm_master_list_entry *mle) in dlm_put_mle() argument
251 dlm = mle->dlm; in dlm_put_mle()
255 __dlm_put_mle(mle); in dlm_put_mle()
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) in dlm_get_mle() argument
262 kref_get(&mle->mle_refs); in dlm_get_mle()
265 static void dlm_init_mle(struct dlm_master_list_entry *mle, in dlm_init_mle() argument
274 mle->dlm = dlm; in dlm_init_mle()
275 mle->type = type; in dlm_init_mle()
276 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
277 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_init_mle()
279 spin_lock_init(&mle->spinlock); in dlm_init_mle()
280 init_waitqueue_head(&mle->wq); in dlm_init_mle()
281 atomic_set(&mle->woken, 0); in dlm_init_mle()
282 kref_init(&mle->mle_refs); in dlm_init_mle()
283 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_init_mle()
284 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
285 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
286 mle->inuse = 0; in dlm_init_mle()
288 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
289 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
290 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
292 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
294 mle->mleres = res; in dlm_init_mle()
295 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
296 mle->mnamelen = res->lockname.len; in dlm_init_mle()
297 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
300 mle->mleres = NULL; in dlm_init_mle()
301 memcpy(mle->mname, name, namelen); in dlm_init_mle()
302 mle->mnamelen = namelen; in dlm_init_mle()
303 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
306 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
307 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); in dlm_init_mle()
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); in dlm_init_mle()
312 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
313 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
316 __dlm_mle_attach_hb_events(dlm, mle); in dlm_init_mle()
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_unlink_mle() argument
324 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
325 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_insert_mle() argument
334 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
335 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
340 struct dlm_master_list_entry **mle, in dlm_find_mle() argument
358 *mle = tmpmle; in dlm_find_mle()
366 struct dlm_master_list_entry *mle; in dlm_hb_event_notify_attached() local
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
372 dlm_mle_node_up(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
374 dlm_mle_node_down(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
379 struct dlm_master_list_entry *mle, in dlm_mle_node_down() argument
382 spin_lock(&mle->spinlock); in dlm_mle_node_down()
384 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
387 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
389 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
393 struct dlm_master_list_entry *mle, in dlm_mle_node_up() argument
396 spin_lock(&mle->spinlock); in dlm_mle_node_up()
398 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
401 set_bit(idx, mle->node_map); in dlm_mle_node_up()
403 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
426 struct dlm_master_list_entry *mle; in dlm_mle_release() local
429 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); in dlm_mle_release()
430 dlm = mle->dlm; in dlm_mle_release()
435 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
436 mle->type); in dlm_mle_release()
439 __dlm_unlink_mle(dlm, mle); in dlm_mle_release()
442 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_release()
444 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
448 kmem_cache_free(dlm_mle_cache, mle); in dlm_mle_release()
707 struct dlm_master_list_entry *mle = NULL; in dlm_get_lock_resource() local
791 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); in dlm_get_lock_resource()
794 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
798 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
807 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
808 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
819 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
820 dlm_put_mle(mle); in dlm_get_lock_resource()
821 mle = NULL; in dlm_get_lock_resource()
830 mle = alloc_mle; in dlm_get_lock_resource()
833 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
834 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
835 __dlm_insert_mle(dlm, mle); in dlm_get_lock_resource()
867 dlm_get_mle_inuse(mle); in dlm_get_lock_resource()
913 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
915 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
918 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
920 if (mle->master <= nodenum) in dlm_get_lock_resource()
928 lockid, nodenum, mle->master); in dlm_get_lock_resource()
934 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
946 dlm_print_one_mle(mle); in dlm_get_lock_resource()
958 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
959 dlm_put_mle(mle); in dlm_get_lock_resource()
961 dlm_put_mle_inuse(mle); in dlm_get_lock_resource()
982 struct dlm_master_list_entry *mle, in dlm_wait_for_lock_mastery() argument
1003 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1016 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1017 m = mle->master; in dlm_wait_for_lock_mastery()
1018 map_changed = (memcmp(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1019 sizeof(mle->vote_map)) != 0); in dlm_wait_for_lock_mastery()
1020 voting_done = (memcmp(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1021 sizeof(mle->vote_map)) == 0); in dlm_wait_for_lock_mastery()
1028 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1029 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1036 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1061 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_wait_for_lock_mastery()
1066 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1077 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1089 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1090 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1091 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1108 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1194 struct dlm_master_list_entry *mle, in dlm_restart_lock_mastery() argument
1205 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1207 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1218 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1219 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1223 int lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1227 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1233 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1258 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1259 mle->mleres = res; in dlm_restart_lock_mastery()
1266 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_restart_lock_mastery()
1267 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_restart_lock_mastery()
1269 memcpy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1270 sizeof(mle->node_map)); in dlm_restart_lock_mastery()
1272 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1273 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1293 struct dlm_master_list_entry *mle, int to) in dlm_do_master_request() argument
1295 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1302 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1304 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1305 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1338 spin_lock(&mle->spinlock); in dlm_do_master_request()
1341 set_bit(to, mle->response_map); in dlm_do_master_request()
1346 mle->master = to; in dlm_do_master_request()
1350 set_bit(to, mle->response_map); in dlm_do_master_request()
1354 set_bit(to, mle->response_map); in dlm_do_master_request()
1355 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1366 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1393 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; in dlm_master_request_handler() local
1431 if (mle) in dlm_master_request_handler()
1432 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1440 if (mle) in dlm_master_request_handler()
1441 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1455 if (mle) in dlm_master_request_handler()
1456 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1520 if (mle) in dlm_master_request_handler()
1521 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1536 if (!mle) { in dlm_master_request_handler()
1540 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_master_request_handler()
1541 if (!mle) { in dlm_master_request_handler()
1551 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); in dlm_master_request_handler()
1552 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1553 __dlm_insert_mle(dlm, mle); in dlm_master_request_handler()
1650 struct dlm_master_list_entry *mle = NULL; in dlm_do_assert_master() local
1681 if (dlm_find_mle(dlm, &mle, (char *)lockname, in dlm_do_assert_master()
1683 dlm_print_one_mle(mle); in dlm_do_assert_master()
1684 __dlm_put_mle(mle); in dlm_do_assert_master()
1738 struct dlm_master_list_entry *mle = NULL; in dlm_assert_master_handler() local
1767 if (!dlm_find_mle(dlm, &mle, name, namelen)) { in dlm_assert_master_handler()
1773 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_assert_master_handler()
1796 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1807 __dlm_put_mle(mle); in dlm_assert_master_handler()
1826 if (!mle) { in dlm_assert_master_handler()
1836 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1861 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1865 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1866 mle->master, namelen, name); in dlm_assert_master_handler()
1877 if (mle) { in dlm_assert_master_handler()
1882 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1883 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1889 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1895 mle->master = assert->node_idx; in dlm_assert_master_handler()
1896 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1897 wake_up(&mle->wq); in dlm_assert_master_handler()
1898 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1903 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1907 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1910 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1913 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1926 rr = atomic_read(&mle->mle_refs.refcount); in dlm_assert_master_handler()
1927 if (mle->inuse > 0) { in dlm_assert_master_handler()
1942 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1943 dlm_print_one_mle(mle); in dlm_assert_master_handler()
1945 __dlm_unlink_mle(dlm, mle); in dlm_assert_master_handler()
1946 __dlm_mle_detach_hb_events(dlm, mle); in dlm_assert_master_handler()
1947 __dlm_put_mle(mle); in dlm_assert_master_handler()
1953 __dlm_put_mle(mle); in dlm_assert_master_handler()
2401 struct dlm_master_list_entry *mle = NULL; in dlm_migrate_lockres() local
2429 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_lockres()
2430 if (!mle) { in dlm_migrate_lockres()
2442 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2477 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2478 dlm_put_mle(mle); in dlm_migrate_lockres()
2479 } else if (mle) { in dlm_migrate_lockres()
2480 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_lockres()
2481 mle = NULL; in dlm_migrate_lockres()
2503 dlm_get_mle_inuse(mle); in dlm_migrate_lockres()
2518 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2519 dlm_put_mle(mle); in dlm_migrate_lockres()
2520 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2544 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2545 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2549 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2564 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2565 dlm_put_mle(mle); in dlm_migrate_lockres()
2566 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2587 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2588 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2967 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; in dlm_migrate_request_handler() local
2980 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_request_handler()
2982 if (!mle) { in dlm_migrate_request_handler()
2999 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3009 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3040 struct dlm_master_list_entry *mle, in dlm_add_migration_mle() argument
3094 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3095 mle->new_master = new_master; in dlm_add_migration_mle()
3098 mle->master = master; in dlm_add_migration_mle()
3100 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3101 __dlm_insert_mle(dlm, mle); in dlm_add_migration_mle()
3110 struct dlm_master_list_entry *mle) in dlm_reset_mleres_owner() argument
3115 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3116 mle->mnamehash); in dlm_reset_mleres_owner()
3128 __dlm_mle_detach_hb_events(dlm, mle); in dlm_reset_mleres_owner()
3132 __dlm_put_mle(mle); in dlm_reset_mleres_owner()
3140 struct dlm_master_list_entry *mle) in dlm_clean_migration_mle() argument
3142 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_migration_mle()
3144 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3145 __dlm_unlink_mle(dlm, mle); in dlm_clean_migration_mle()
3146 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3147 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3149 wake_up(&mle->wq); in dlm_clean_migration_mle()
3153 struct dlm_master_list_entry *mle, u8 dead_node) in dlm_clean_block_mle() argument
3157 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3159 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3160 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_clean_block_mle()
3164 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3171 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3172 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3173 wake_up(&mle->wq); in dlm_clean_block_mle()
3176 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_block_mle()
3177 __dlm_put_mle(mle); in dlm_clean_block_mle()
3183 struct dlm_master_list_entry *mle; in dlm_clean_master_list() local
3198 mle = hlist_entry(list, struct dlm_master_list_entry, in dlm_clean_master_list()
3201 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3202 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3203 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3208 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3214 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3215 dlm_clean_block_mle(dlm, mle, dead_node); in dlm_clean_master_list()
3230 if (mle->master != dead_node && in dlm_clean_master_list()
3231 mle->new_master != dead_node) in dlm_clean_master_list()
3236 dlm_clean_migration_mle(dlm, mle); in dlm_clean_master_list()
3239 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3240 mle->new_master); in dlm_clean_master_list()
3247 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3253 __dlm_put_mle(mle); in dlm_clean_master_list()
3380 struct dlm_master_list_entry *mle; in dlm_force_free_mles() local
3398 mle = hlist_entry(list, struct dlm_master_list_entry, in dlm_force_free_mles()
3400 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3401 mlog(ML_ERROR, "bad mle: %p\n", mle); in dlm_force_free_mles()
3402 dlm_print_one_mle(mle); in dlm_force_free_mles()
3404 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3405 wake_up(&mle->wq); in dlm_force_free_mles()
3407 __dlm_unlink_mle(dlm, mle); in dlm_force_free_mles()
3408 __dlm_mle_detach_hb_events(dlm, mle); in dlm_force_free_mles()
3409 __dlm_put_mle(mle); in dlm_force_free_mles()