• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Training state management."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import os
21
22from tensorflow.python.framework import constant_op
23from tensorflow.python.framework import dtypes
24from tensorflow.python.keras import backend as K
25from tensorflow.python.keras.distribute import distributed_file_utils
26from tensorflow.python.keras.utils import mode_keys
27from tensorflow.python.lib.io import file_io
28from tensorflow.python.ops import variables
29from tensorflow.python.training import checkpoint_management
30from tensorflow.python.training.tracking import util as trackable_util
31
32# Constant for `tf.keras.Model` attribute to store the epoch at which the most
33# recently saved checkpoint was saved.
34CKPT_SAVED_EPOCH = '_ckpt_saved_epoch'
35
36CKPT_SAVED_EPOCH_UNUSED_VALUE = -1
37
38
39class WorkerTrainingState(object):
40  """Training state management class.
41
42  This class provides apis for backing up and restoring the training state.
43  This allows model and epoch information to be saved periodically and restore
44  for fault-tolerance, also known as preemption-recovery purpose.
45  """
46
47  def __init__(self, model, checkpoint_dir):
48    self._model = model
49
50    # The epoch at which the checkpoint is saved. Used for fault-tolerance.
51    # GPU device only has int64 dtype registered VarHandleOp.
52    self._ckpt_saved_epoch = variables.Variable(
53        initial_value=constant_op.constant(
54            CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=dtypes.int64),
55        name='ckpt_saved_epoch')
56
57    # Variable initialization.
58    K.set_value(self._ckpt_saved_epoch, CKPT_SAVED_EPOCH_UNUSED_VALUE)
59
60    # _ckpt_saved_epoch gets tracked and is included in the checkpoint file
61    # when backing up.
62    checkpoint = trackable_util.Checkpoint(
63        model=self._model, ckpt_saved_epoch=self._ckpt_saved_epoch)
64
65    # If this is single-worker training, checkpoint_dir are the same for
66    # write_checkpoint_manager and read_checkpoint_manager.
67    #
68    # If this is multi-worker training, and this worker should not
69    # save checkpoint, we replace the write_checkpoint_manager's checkpoint_dir
70    # with a temp filepath, so it writes to a file that will be removed at the
71    # end of back_up() call. This is necessary because the SyncOnReadVariable
72    # needs to be synced across all the workers in order to be read, and all
73    # workers need to perform `save()`.
74    # But all workers should restore from the same checkpoint_dir as passed in
75    # read_checkpoint_manager.
76    self.read_checkpoint_manager = checkpoint_management.CheckpointManager(
77        checkpoint,
78        directory=os.path.join(checkpoint_dir, 'chief'),
79        max_to_keep=1)
80    write_checkpoint_dir = distributed_file_utils.write_dirpath(
81        checkpoint_dir, self._model.distribute_strategy)
82    if self._model.distribute_strategy.extended.should_checkpoint:
83      self.write_checkpoint_manager = self.read_checkpoint_manager
84    else:
85      self.write_checkpoint_manager = checkpoint_management.CheckpointManager(
86          checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
87
88  def back_up(self, epoch):
89    """Back up the current state of training into a checkpoint file.
90
91    Args:
92      epoch: The current epoch information to be saved.
93    """
94    K.set_value(self._ckpt_saved_epoch, epoch)
95    # Save the model plus CKPT_SAVED_EPOCH variable.
96    if self.write_checkpoint_manager.save():
97      distributed_file_utils.remove_temp_dirpath(
98          self.write_checkpoint_manager.directory,
99          self._model.distribute_strategy)
100
101  def restore(self):
102    """Restore the training state from the backed up checkpoint file.
103
104    Returns:
105      True if the training state is successfully restored. False if the training
106      state doesn't need to be restored, or error occurred so it can't.
107    """
108    self.read_checkpoint_manager.restore_or_initialize()
109
110  def delete_backup(self):
111    """Delete the backup directories.
112
113    Delete the backup directories which should not exist after `fit()`
114    successfully finishes.
115    """
116    if self.write_checkpoint_manager is self.read_checkpoint_manager:
117      file_io.delete_recursively_v2(self.write_checkpoint_manager.directory)
118
119  def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
120    """Maybe load initial epoch from ckpt considering possible worker recovery.
121
122    When `_ckpt_saved_epoch` attribute exists and is not
123    `CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting
124    and indicates the worker is recovering from previous failure. In this case,
125    infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous
126    unfinished training from certain epoch.
127
128    Args:
129      initial_epoch: The original initial_epoch user passes in in `fit()`.
130      mode: The mode for running `model.fit()`.
131
132    Returns:
133      If the training is recovering from previous failure under multi-worker
134      training setting, return the epoch the training is supposed to continue
135      at. Otherwise, return the `initial_epoch` the user passes in.
136    """
137
138    epoch = K.eval(self._ckpt_saved_epoch)
139    if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:
140      # The most recently saved epoch is one epoch prior to the epoch it
141      # failed at, so return the value of 'self._ckpt_saved_epoch' plus one.
142      return epoch + 1
143    return initial_epoch
144