• Home
  • Raw
  • Download

Lines Matching full:migration

839 		/* if there is a migration in progress, let the migration  in dlm_get_lock_resource()
841 * of the MIGRATION mle: either the migrate finished or in dlm_get_lock_resource()
853 mig ? "MIGRATION" : "BLOCK"); in dlm_get_lock_resource()
1608 mlog(0, "migration mle was found (%u->%u)\n", in dlm_master_request_handler()
1841 " from %u for migration\n", in dlm_assert_master_handler()
1846 " from %u for migration, ignoring\n", in dlm_assert_master_handler()
1948 mlog(0, "finishing off migration of lockres %.*s, " in dlm_assert_master_handler()
1994 * ref given by the master / migration request message. in dlm_assert_master_handler()
2136 * prevent migration from starting while we're still asserting in dlm_assert_master_worker()
2137 * our dominance. The reserved ast delays migration. in dlm_assert_master_worker()
2142 "in the middle of migration. Skipping assert, " in dlm_assert_master_worker()
2161 /* Ok, we've asserted ourselves. Let's let migration start. */ in dlm_assert_master_worker()
2510 /* delay migration when the lockres is in MIGRATING state */ in dlm_is_lockres_migratable()
2514 /* delay migration when the lockres is in RECOCERING state */ in dlm_is_lockres_migratable()
2597 * add the migration mle to the list in dlm_migrate_lockres()
2654 * at this point, we have a migration target, an mle in dlm_migrate_lockres()
2664 /* call send_one_lockres with migration flag. in dlm_migrate_lockres()
2666 * migration is starting. */ in dlm_migrate_lockres()
2671 mlog(0, "migration to node %u failed with %d\n", in dlm_migrate_lockres()
2673 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2691 * will be the last one notified, ensuring that the migration in dlm_migrate_lockres()
2694 * master, so it is important that my recovery finds the migration in dlm_migrate_lockres()
2709 mlog(0, "%s:%.*s: timed out during migration\n", in dlm_migrate_lockres()
2714 mlog(0, "%s:%.*s: expected migration " in dlm_migrate_lockres()
2719 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2730 mlog(0, "%s:%.*s: caught signal during migration\n", in dlm_migrate_lockres()
2755 * but migration failed */ in dlm_migrate_lockres()
2776 * Called with the dlm spinlock held, may drop it to do migration, but
2908 mlog(ML_ERROR, "aha. migration target %u just went down\n", in dlm_mark_lockres_migrating()
2939 /* last step in the migration process.
3092 /* during the migration request we short-circuited in dlm_do_migrate_request()
3117 * the migration and this should be the only one found for those scanning the
3193 * when adding a migration mle, we can clear any other mles
3197 * the new migration mle. this way we can hold with the rule
3229 mlog(ML_ERROR, "migration error mle: " in dlm_add_migration_mle()
3251 "migration\n", dlm->name, in dlm_add_migration_mle()
3259 /* now add a migration mle to the tail of the list */ in dlm_add_migration_mle()
3382 /* Everything else is a MIGRATION mle */ in dlm_clean_master_list()
3384 /* The rule for MIGRATION mles is that the master in dlm_clean_master_list()
3399 "migration from %u, the MLE is " in dlm_clean_master_list()
3410 mlog(0, "%s: node %u died during migration from " in dlm_clean_master_list()
3445 * a reference after the migration completes */ in dlm_finish_migration()
3498 * this is integral to migration
3520 * also, if there is a pending migration on this lockres,
3523 * this is how we ensure that migration can proceed with no