Lines Matching refs:mle
39 struct dlm_master_list_entry *mle,
43 struct dlm_master_list_entry *mle,
54 struct dlm_master_list_entry *mle, in dlm_mle_equal() argument
58 if (dlm != mle->dlm) in dlm_mle_equal()
61 if (namelen != mle->mnamelen || in dlm_mle_equal()
62 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
73 static void dlm_init_mle(struct dlm_master_list_entry *mle,
79 static void dlm_put_mle(struct dlm_master_list_entry *mle);
80 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
82 struct dlm_master_list_entry **mle,
86 struct dlm_master_list_entry *mle, int to);
91 struct dlm_master_list_entry *mle,
95 struct dlm_master_list_entry *mle,
99 struct dlm_master_list_entry *mle,
162 struct dlm_master_list_entry *mle) in __dlm_mle_attach_hb_events() argument
166 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
171 struct dlm_master_list_entry *mle) in __dlm_mle_detach_hb_events() argument
173 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
174 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
179 struct dlm_master_list_entry *mle) in dlm_mle_detach_hb_events() argument
182 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_detach_hb_events()
186 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) in dlm_get_mle_inuse() argument
189 dlm = mle->dlm; in dlm_get_mle_inuse()
193 mle->inuse++; in dlm_get_mle_inuse()
194 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
197 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) in dlm_put_mle_inuse() argument
200 dlm = mle->dlm; in dlm_put_mle_inuse()
204 mle->inuse--; in dlm_put_mle_inuse()
205 __dlm_put_mle(mle); in dlm_put_mle_inuse()
212 static void __dlm_put_mle(struct dlm_master_list_entry *mle) in __dlm_put_mle() argument
215 dlm = mle->dlm; in __dlm_put_mle()
219 if (!kref_read(&mle->mle_refs)) { in __dlm_put_mle()
222 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
223 dlm_print_one_mle(mle); in __dlm_put_mle()
226 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
231 static void dlm_put_mle(struct dlm_master_list_entry *mle) in dlm_put_mle() argument
234 dlm = mle->dlm; in dlm_put_mle()
238 __dlm_put_mle(mle); in dlm_put_mle()
243 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) in dlm_get_mle() argument
245 kref_get(&mle->mle_refs); in dlm_get_mle()
248 static void dlm_init_mle(struct dlm_master_list_entry *mle, in dlm_init_mle() argument
257 mle->dlm = dlm; in dlm_init_mle()
258 mle->type = type; in dlm_init_mle()
259 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
260 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
261 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); in dlm_init_mle()
262 spin_lock_init(&mle->spinlock); in dlm_init_mle()
263 init_waitqueue_head(&mle->wq); in dlm_init_mle()
264 atomic_set(&mle->woken, 0); in dlm_init_mle()
265 kref_init(&mle->mle_refs); in dlm_init_mle()
266 bitmap_zero(mle->response_map, O2NM_MAX_NODES); in dlm_init_mle()
267 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
268 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
269 mle->inuse = 0; in dlm_init_mle()
271 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
272 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
273 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
275 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
277 mle->mleres = res; in dlm_init_mle()
278 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
279 mle->mnamelen = res->lockname.len; in dlm_init_mle()
280 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
283 mle->mleres = NULL; in dlm_init_mle()
284 memcpy(mle->mname, name, namelen); in dlm_init_mle()
285 mle->mnamelen = namelen; in dlm_init_mle()
286 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
289 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
290 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
293 bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES); in dlm_init_mle()
294 bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES); in dlm_init_mle()
295 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
296 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
299 __dlm_mle_attach_hb_events(dlm, mle); in dlm_init_mle()
302 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_unlink_mle() argument
307 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
308 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
311 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_insert_mle() argument
317 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
318 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
323 struct dlm_master_list_entry **mle, in dlm_find_mle() argument
338 *mle = tmpmle; in dlm_find_mle()
346 struct dlm_master_list_entry *mle; in dlm_hb_event_notify_attached() local
350 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
352 dlm_mle_node_up(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
354 dlm_mle_node_down(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
359 struct dlm_master_list_entry *mle, in dlm_mle_node_down() argument
362 spin_lock(&mle->spinlock); in dlm_mle_node_down()
364 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
367 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
369 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
373 struct dlm_master_list_entry *mle, in dlm_mle_node_up() argument
376 spin_lock(&mle->spinlock); in dlm_mle_node_up()
378 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
381 set_bit(idx, mle->node_map); in dlm_mle_node_up()
383 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
405 struct dlm_master_list_entry *mle; in dlm_mle_release() local
408 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); in dlm_mle_release()
409 dlm = mle->dlm; in dlm_mle_release()
414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
415 mle->type); in dlm_mle_release()
418 __dlm_unlink_mle(dlm, mle); in dlm_mle_release()
421 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_release()
423 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
427 kmem_cache_free(dlm_mle_cache, mle); in dlm_mle_release()
707 struct dlm_master_list_entry *mle = NULL; in dlm_get_lock_resource() local
814 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); in dlm_get_lock_resource()
817 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
821 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
830 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
831 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
842 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
843 dlm_put_mle(mle); in dlm_get_lock_resource()
844 mle = NULL; in dlm_get_lock_resource()
853 mle = alloc_mle; in dlm_get_lock_resource()
856 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
857 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
858 __dlm_insert_mle(dlm, mle); in dlm_get_lock_resource()
888 dlm_get_mle_inuse(mle); in dlm_get_lock_resource()
934 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
936 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
939 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
941 if (mle->master <= nodenum) in dlm_get_lock_resource()
949 lockid, nodenum, mle->master); in dlm_get_lock_resource()
955 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
967 dlm_print_one_mle(mle); in dlm_get_lock_resource()
979 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
980 dlm_put_mle(mle); in dlm_get_lock_resource()
982 dlm_put_mle_inuse(mle); in dlm_get_lock_resource()
1003 struct dlm_master_list_entry *mle, in dlm_wait_for_lock_mastery() argument
1024 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1037 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1038 m = mle->master; in dlm_wait_for_lock_mastery()
1039 map_changed = !bitmap_equal(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1041 voting_done = bitmap_equal(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1049 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1050 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1057 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1082 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_wait_for_lock_mastery()
1087 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1098 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1103 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1104 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1105 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1208 struct dlm_master_list_entry *mle, in dlm_restart_lock_mastery() argument
1219 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1221 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1232 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1233 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1237 int lowest = find_first_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1241 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1247 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1272 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1273 mle->mleres = res; in dlm_restart_lock_mastery()
1280 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); in dlm_restart_lock_mastery()
1281 bitmap_zero(mle->response_map, O2NM_MAX_NODES); in dlm_restart_lock_mastery()
1283 bitmap_copy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1286 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1287 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1307 struct dlm_master_list_entry *mle, int to) in dlm_do_master_request() argument
1309 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1316 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1318 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1319 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1352 spin_lock(&mle->spinlock); in dlm_do_master_request()
1355 set_bit(to, mle->response_map); in dlm_do_master_request()
1360 mle->master = to; in dlm_do_master_request()
1364 set_bit(to, mle->response_map); in dlm_do_master_request()
1368 set_bit(to, mle->response_map); in dlm_do_master_request()
1369 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1380 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1407 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; in dlm_master_request_handler() local
1458 if (mle) in dlm_master_request_handler()
1459 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1467 if (mle) in dlm_master_request_handler()
1468 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1482 if (mle) in dlm_master_request_handler()
1483 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1547 if (mle) in dlm_master_request_handler()
1548 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1563 if (!mle) { in dlm_master_request_handler()
1567 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_master_request_handler()
1568 if (!mle) { in dlm_master_request_handler()
1578 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); in dlm_master_request_handler()
1579 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1580 __dlm_insert_mle(dlm, mle); in dlm_master_request_handler()
1675 struct dlm_master_list_entry *mle = NULL; in dlm_do_assert_master() local
1706 if (dlm_find_mle(dlm, &mle, (char *)lockname, in dlm_do_assert_master()
1708 dlm_print_one_mle(mle); in dlm_do_assert_master()
1709 __dlm_put_mle(mle); in dlm_do_assert_master()
1763 struct dlm_master_list_entry *mle = NULL; in dlm_assert_master_handler() local
1792 if (!dlm_find_mle(dlm, &mle, name, namelen)) { in dlm_assert_master_handler()
1798 int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_assert_master_handler()
1821 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1832 __dlm_put_mle(mle); in dlm_assert_master_handler()
1851 if (!mle) { in dlm_assert_master_handler()
1861 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1886 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1890 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1891 mle->master, namelen, name); in dlm_assert_master_handler()
1902 if (mle) { in dlm_assert_master_handler()
1907 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1908 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1914 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1922 mle->master = assert->node_idx; in dlm_assert_master_handler()
1923 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1924 wake_up(&mle->wq); in dlm_assert_master_handler()
1925 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1930 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1934 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1937 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1940 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1953 rr = kref_read(&mle->mle_refs); in dlm_assert_master_handler()
1954 if (mle->inuse > 0) { in dlm_assert_master_handler()
1969 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1970 dlm_print_one_mle(mle); in dlm_assert_master_handler()
1972 __dlm_unlink_mle(dlm, mle); in dlm_assert_master_handler()
1973 __dlm_mle_detach_hb_events(dlm, mle); in dlm_assert_master_handler()
1974 __dlm_put_mle(mle); in dlm_assert_master_handler()
1980 __dlm_put_mle(mle); in dlm_assert_master_handler()
2026 if (mle) in dlm_assert_master_handler()
2027 __dlm_put_mle(mle); in dlm_assert_master_handler()
2543 struct dlm_master_list_entry *mle = NULL; in dlm_migrate_lockres() local
2569 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_lockres()
2570 if (!mle) { in dlm_migrate_lockres()
2582 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2589 dlm_get_mle_inuse(mle); in dlm_migrate_lockres()
2624 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2625 dlm_put_mle(mle); in dlm_migrate_lockres()
2626 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2627 } else if (mle) { in dlm_migrate_lockres()
2628 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_lockres()
2629 mle = NULL; in dlm_migrate_lockres()
2655 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2656 dlm_put_mle(mle); in dlm_migrate_lockres()
2657 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2681 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2682 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2686 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2701 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2702 dlm_put_mle(mle); in dlm_migrate_lockres()
2703 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2724 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2725 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
3105 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; in dlm_migrate_request_handler() local
3118 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_request_handler()
3120 if (!mle) { in dlm_migrate_request_handler()
3137 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3147 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3153 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3181 struct dlm_master_list_entry *mle, in dlm_add_migration_mle() argument
3240 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3241 mle->new_master = new_master; in dlm_add_migration_mle()
3244 mle->master = master; in dlm_add_migration_mle()
3246 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3247 __dlm_insert_mle(dlm, mle); in dlm_add_migration_mle()
3256 struct dlm_master_list_entry *mle) in dlm_reset_mleres_owner() argument
3261 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3262 mle->mnamehash); in dlm_reset_mleres_owner()
3274 __dlm_mle_detach_hb_events(dlm, mle); in dlm_reset_mleres_owner()
3278 __dlm_put_mle(mle); in dlm_reset_mleres_owner()
3286 struct dlm_master_list_entry *mle) in dlm_clean_migration_mle() argument
3288 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_migration_mle()
3290 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3291 __dlm_unlink_mle(dlm, mle); in dlm_clean_migration_mle()
3292 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3293 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3295 wake_up(&mle->wq); in dlm_clean_migration_mle()
3299 struct dlm_master_list_entry *mle, u8 dead_node) in dlm_clean_block_mle() argument
3303 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3305 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3306 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_clean_block_mle()
3310 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3317 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3318 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3319 wake_up(&mle->wq); in dlm_clean_block_mle()
3322 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_block_mle()
3323 __dlm_put_mle(mle); in dlm_clean_block_mle()
3329 struct dlm_master_list_entry *mle; in dlm_clean_master_list() local
3343 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3344 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3345 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3346 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3351 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3357 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3358 dlm_clean_block_mle(dlm, mle, dead_node); in dlm_clean_master_list()
3373 if (mle->master != dead_node && in dlm_clean_master_list()
3374 mle->new_master != dead_node) in dlm_clean_master_list()
3377 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3382 mle->master); in dlm_clean_master_list()
3388 dlm_clean_migration_mle(dlm, mle); in dlm_clean_master_list()
3391 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3392 mle->new_master); in dlm_clean_master_list()
3399 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3405 __dlm_put_mle(mle); in dlm_clean_master_list()
3532 struct dlm_master_list_entry *mle; in dlm_force_free_mles() local
3549 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_force_free_mles()
3550 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3551 mlog(ML_ERROR, "bad mle: %p\n", mle); in dlm_force_free_mles()
3552 dlm_print_one_mle(mle); in dlm_force_free_mles()
3554 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3555 wake_up(&mle->wq); in dlm_force_free_mles()
3557 __dlm_unlink_mle(dlm, mle); in dlm_force_free_mles()
3558 __dlm_mle_detach_hb_events(dlm, mle); in dlm_force_free_mles()
3559 __dlm_put_mle(mle); in dlm_force_free_mles()