Lines Matching refs:mle
56 struct dlm_master_list_entry *mle,
60 struct dlm_master_list_entry *mle,
71 struct dlm_master_list_entry *mle, in dlm_mle_equal() argument
75 if (dlm != mle->dlm) in dlm_mle_equal()
78 if (namelen != mle->mnamelen || in dlm_mle_equal()
79 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
99 struct dlm_master_list_entry **mle,
103 struct dlm_master_list_entry *mle, int to);
108 struct dlm_master_list_entry *mle,
112 struct dlm_master_list_entry *mle,
116 struct dlm_master_list_entry *mle,
179 struct dlm_master_list_entry *mle) in __dlm_mle_attach_hb_events() argument
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
188 struct dlm_master_list_entry *mle) in __dlm_mle_detach_hb_events() argument
190 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
191 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
196 struct dlm_master_list_entry *mle) in dlm_mle_detach_hb_events() argument
199 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_detach_hb_events()
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) in dlm_get_mle_inuse() argument
206 dlm = mle->dlm; in dlm_get_mle_inuse()
210 mle->inuse++; in dlm_get_mle_inuse()
211 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) in dlm_put_mle_inuse() argument
217 dlm = mle->dlm; in dlm_put_mle_inuse()
221 mle->inuse--; in dlm_put_mle_inuse()
222 __dlm_put_mle(mle); in dlm_put_mle_inuse()
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) in __dlm_put_mle() argument
232 dlm = mle->dlm; in __dlm_put_mle()
236 if (!atomic_read(&mle->mle_refs.refcount)) { in __dlm_put_mle()
239 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
240 dlm_print_one_mle(mle); in __dlm_put_mle()
243 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
248 static void dlm_put_mle(struct dlm_master_list_entry *mle) in dlm_put_mle() argument
251 dlm = mle->dlm; in dlm_put_mle()
255 __dlm_put_mle(mle); in dlm_put_mle()
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) in dlm_get_mle() argument
262 kref_get(&mle->mle_refs); in dlm_get_mle()
265 static void dlm_init_mle(struct dlm_master_list_entry *mle, in dlm_init_mle() argument
274 mle->dlm = dlm; in dlm_init_mle()
275 mle->type = type; in dlm_init_mle()
276 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
277 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_init_mle()
279 spin_lock_init(&mle->spinlock); in dlm_init_mle()
280 init_waitqueue_head(&mle->wq); in dlm_init_mle()
281 atomic_set(&mle->woken, 0); in dlm_init_mle()
282 kref_init(&mle->mle_refs); in dlm_init_mle()
283 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_init_mle()
284 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
285 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
286 mle->inuse = 0; in dlm_init_mle()
288 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
289 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
290 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
292 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
294 mle->mleres = res; in dlm_init_mle()
295 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
296 mle->mnamelen = res->lockname.len; in dlm_init_mle()
297 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
300 mle->mleres = NULL; in dlm_init_mle()
301 memcpy(mle->mname, name, namelen); in dlm_init_mle()
302 mle->mnamelen = namelen; in dlm_init_mle()
303 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
306 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
307 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); in dlm_init_mle()
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); in dlm_init_mle()
312 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
313 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
316 __dlm_mle_attach_hb_events(dlm, mle); in dlm_init_mle()
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_unlink_mle() argument
324 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
325 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_insert_mle() argument
334 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
335 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
340 struct dlm_master_list_entry **mle, in dlm_find_mle() argument
355 *mle = tmpmle; in dlm_find_mle()
363 struct dlm_master_list_entry *mle; in dlm_hb_event_notify_attached() local
367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
369 dlm_mle_node_up(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
371 dlm_mle_node_down(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
376 struct dlm_master_list_entry *mle, in dlm_mle_node_down() argument
379 spin_lock(&mle->spinlock); in dlm_mle_node_down()
381 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
384 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
386 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
390 struct dlm_master_list_entry *mle, in dlm_mle_node_up() argument
393 spin_lock(&mle->spinlock); in dlm_mle_node_up()
395 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
398 set_bit(idx, mle->node_map); in dlm_mle_node_up()
400 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
423 struct dlm_master_list_entry *mle; in dlm_mle_release() local
426 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); in dlm_mle_release()
427 dlm = mle->dlm; in dlm_mle_release()
432 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
433 mle->type); in dlm_mle_release()
436 __dlm_unlink_mle(dlm, mle); in dlm_mle_release()
439 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_release()
441 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
445 kmem_cache_free(dlm_mle_cache, mle); in dlm_mle_release()
729 struct dlm_master_list_entry *mle = NULL; in dlm_get_lock_resource() local
836 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); in dlm_get_lock_resource()
839 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
843 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
852 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
853 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
864 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
865 dlm_put_mle(mle); in dlm_get_lock_resource()
866 mle = NULL; in dlm_get_lock_resource()
875 mle = alloc_mle; in dlm_get_lock_resource()
878 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
879 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
880 __dlm_insert_mle(dlm, mle); in dlm_get_lock_resource()
910 dlm_get_mle_inuse(mle); in dlm_get_lock_resource()
956 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
958 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
961 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
963 if (mle->master <= nodenum) in dlm_get_lock_resource()
971 lockid, nodenum, mle->master); in dlm_get_lock_resource()
977 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
989 dlm_print_one_mle(mle); in dlm_get_lock_resource()
1001 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
1002 dlm_put_mle(mle); in dlm_get_lock_resource()
1004 dlm_put_mle_inuse(mle); in dlm_get_lock_resource()
1025 struct dlm_master_list_entry *mle, in dlm_wait_for_lock_mastery() argument
1046 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1059 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1060 m = mle->master; in dlm_wait_for_lock_mastery()
1061 map_changed = (memcmp(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1062 sizeof(mle->vote_map)) != 0); in dlm_wait_for_lock_mastery()
1063 voting_done = (memcmp(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1064 sizeof(mle->vote_map)) == 0); in dlm_wait_for_lock_mastery()
1071 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1072 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1079 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1104 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_wait_for_lock_mastery()
1109 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1120 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1132 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1133 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1134 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1151 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1237 struct dlm_master_list_entry *mle, in dlm_restart_lock_mastery() argument
1248 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1250 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1261 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1262 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1266 int lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1270 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1276 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1301 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1302 mle->mleres = res; in dlm_restart_lock_mastery()
1309 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_restart_lock_mastery()
1310 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_restart_lock_mastery()
1312 memcpy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1313 sizeof(mle->node_map)); in dlm_restart_lock_mastery()
1315 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1316 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1336 struct dlm_master_list_entry *mle, int to) in dlm_do_master_request() argument
1338 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1345 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1347 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1348 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1381 spin_lock(&mle->spinlock); in dlm_do_master_request()
1384 set_bit(to, mle->response_map); in dlm_do_master_request()
1389 mle->master = to; in dlm_do_master_request()
1393 set_bit(to, mle->response_map); in dlm_do_master_request()
1397 set_bit(to, mle->response_map); in dlm_do_master_request()
1398 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1409 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1436 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; in dlm_master_request_handler() local
1487 if (mle) in dlm_master_request_handler()
1488 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1496 if (mle) in dlm_master_request_handler()
1497 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1511 if (mle) in dlm_master_request_handler()
1512 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1576 if (mle) in dlm_master_request_handler()
1577 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1592 if (!mle) { in dlm_master_request_handler()
1596 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_master_request_handler()
1597 if (!mle) { in dlm_master_request_handler()
1607 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); in dlm_master_request_handler()
1608 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1609 __dlm_insert_mle(dlm, mle); in dlm_master_request_handler()
1713 struct dlm_master_list_entry *mle = NULL; in dlm_do_assert_master() local
1744 if (dlm_find_mle(dlm, &mle, (char *)lockname, in dlm_do_assert_master()
1746 dlm_print_one_mle(mle); in dlm_do_assert_master()
1747 __dlm_put_mle(mle); in dlm_do_assert_master()
1801 struct dlm_master_list_entry *mle = NULL; in dlm_assert_master_handler() local
1830 if (!dlm_find_mle(dlm, &mle, name, namelen)) { in dlm_assert_master_handler()
1836 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_assert_master_handler()
1859 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1870 __dlm_put_mle(mle); in dlm_assert_master_handler()
1889 if (!mle) { in dlm_assert_master_handler()
1899 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1924 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1928 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1929 mle->master, namelen, name); in dlm_assert_master_handler()
1940 if (mle) { in dlm_assert_master_handler()
1945 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1946 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1952 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1960 mle->master = assert->node_idx; in dlm_assert_master_handler()
1961 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1962 wake_up(&mle->wq); in dlm_assert_master_handler()
1963 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1968 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1972 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1975 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1978 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1991 rr = atomic_read(&mle->mle_refs.refcount); in dlm_assert_master_handler()
1992 if (mle->inuse > 0) { in dlm_assert_master_handler()
2007 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
2008 dlm_print_one_mle(mle); in dlm_assert_master_handler()
2010 __dlm_unlink_mle(dlm, mle); in dlm_assert_master_handler()
2011 __dlm_mle_detach_hb_events(dlm, mle); in dlm_assert_master_handler()
2012 __dlm_put_mle(mle); in dlm_assert_master_handler()
2018 __dlm_put_mle(mle); in dlm_assert_master_handler()
2064 if (mle) in dlm_assert_master_handler()
2065 __dlm_put_mle(mle); in dlm_assert_master_handler()
2479 struct dlm_master_list_entry *mle = NULL; in dlm_migrate_lockres() local
2507 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_lockres()
2508 if (!mle) { in dlm_migrate_lockres()
2520 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2526 dlm_get_mle_inuse(mle); in dlm_migrate_lockres()
2560 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2561 dlm_put_mle(mle); in dlm_migrate_lockres()
2562 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2563 } else if (mle) { in dlm_migrate_lockres()
2564 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_lockres()
2565 mle = NULL; in dlm_migrate_lockres()
2591 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2592 dlm_put_mle(mle); in dlm_migrate_lockres()
2593 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2617 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2618 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2622 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2637 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2638 dlm_put_mle(mle); in dlm_migrate_lockres()
2639 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2660 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2661 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
3042 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; in dlm_migrate_request_handler() local
3055 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_request_handler()
3057 if (!mle) { in dlm_migrate_request_handler()
3074 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3084 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3115 struct dlm_master_list_entry *mle, in dlm_add_migration_mle() argument
3173 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3174 mle->new_master = new_master; in dlm_add_migration_mle()
3177 mle->master = master; in dlm_add_migration_mle()
3179 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3180 __dlm_insert_mle(dlm, mle); in dlm_add_migration_mle()
3189 struct dlm_master_list_entry *mle) in dlm_reset_mleres_owner() argument
3194 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3195 mle->mnamehash); in dlm_reset_mleres_owner()
3207 __dlm_mle_detach_hb_events(dlm, mle); in dlm_reset_mleres_owner()
3211 __dlm_put_mle(mle); in dlm_reset_mleres_owner()
3219 struct dlm_master_list_entry *mle) in dlm_clean_migration_mle() argument
3221 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_migration_mle()
3223 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3224 __dlm_unlink_mle(dlm, mle); in dlm_clean_migration_mle()
3225 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3226 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3228 wake_up(&mle->wq); in dlm_clean_migration_mle()
3232 struct dlm_master_list_entry *mle, u8 dead_node) in dlm_clean_block_mle() argument
3236 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3238 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3239 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_clean_block_mle()
3243 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3250 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3251 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3252 wake_up(&mle->wq); in dlm_clean_block_mle()
3255 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_block_mle()
3256 __dlm_put_mle(mle); in dlm_clean_block_mle()
3262 struct dlm_master_list_entry *mle; in dlm_clean_master_list() local
3276 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3277 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3278 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3279 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3284 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3290 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3291 dlm_clean_block_mle(dlm, mle, dead_node); in dlm_clean_master_list()
3306 if (mle->master != dead_node && in dlm_clean_master_list()
3307 mle->new_master != dead_node) in dlm_clean_master_list()
3310 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3315 mle->master); in dlm_clean_master_list()
3321 dlm_clean_migration_mle(dlm, mle); in dlm_clean_master_list()
3324 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3325 mle->new_master); in dlm_clean_master_list()
3332 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3338 __dlm_put_mle(mle); in dlm_clean_master_list()
3465 struct dlm_master_list_entry *mle; in dlm_force_free_mles() local
3482 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_force_free_mles()
3483 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3484 mlog(ML_ERROR, "bad mle: %p\n", mle); in dlm_force_free_mles()
3485 dlm_print_one_mle(mle); in dlm_force_free_mles()
3487 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3488 wake_up(&mle->wq); in dlm_force_free_mles()
3490 __dlm_unlink_mle(dlm, mle); in dlm_force_free_mles()
3491 __dlm_mle_detach_hb_events(dlm, mle); in dlm_force_free_mles()
3492 __dlm_put_mle(mle); in dlm_force_free_mles()