/external/tensorflow/tensorflow/python/training/ |
D | checkpoint_utils_test.py | 41 def _create_checkpoints(sess, checkpoint_dir): argument 42 checkpoint_prefix = os.path.join(checkpoint_dir, "model") 60 def _create_partition_checkpoints(sess, checkpoint_dir): argument 61 checkpoint_prefix = os.path.join(checkpoint_dir, "model") 84 checkpoint_dir = self.get_temp_dir() + "/no_checkpoints" 87 checkpoint_utils.load_variable(checkpoint_dir, "var1"), []) 90 checkpoint_dir = self.get_temp_dir() 92 _, _, _, _ = _create_checkpoints(session, checkpoint_dir) 95 checkpoint_utils.load_variable(checkpoint_dir, "var5"), []) 98 checkpoint_dir = self.get_temp_dir() [all …]
|
D | session_manager_test.py | 86 checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session") 89 gfile.DeleteRecursively(checkpoint_dir) 93 gfile.MakeDirs(checkpoint_dir) 104 checkpoint_dir=checkpoint_dir) 106 checkpoint_filename = os.path.join(checkpoint_dir, 112 os.rename(checkpoint_dir, checkpoint_dir2) 113 gfile.MakeDirs(checkpoint_dir) 127 checkpoint_dir=checkpoint_dir, 131 gfile.DeleteRecursively(checkpoint_dir) 132 os.rename(checkpoint_dir2, checkpoint_dir) [all …]
|
D | evaluation_test.py | 71 def _train_model(self, checkpoint_dir, num_steps): argument 94 checkpoint_dir=checkpoint_dir, 104 checkpoint_dir = os.path.join(self.get_temp_dir(), 108 self._train_model(checkpoint_dir, num_steps=300) 118 checkpoint_path = saver.latest_checkpoint(checkpoint_dir) 130 checkpoint_dir = os.path.join(self.get_temp_dir(), 134 self._train_model(checkpoint_dir, num_steps=300) 150 checkpoint_path = saver.latest_checkpoint(checkpoint_dir) 169 checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops') 172 self._train_model(checkpoint_dir, num_steps=1) [all …]
|
D | session_manager.py | 182 checkpoint_dir=None, argument 219 if checkpoint_dir and checkpoint_filename_with_path: 224 if not saver or not (checkpoint_dir or checkpoint_filename_with_path): 234 ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir) 240 ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir) 254 checkpoint_dir=None, argument 312 checkpoint_dir=checkpoint_dir, 345 checkpoint_dir=None, argument 377 checkpoint_dir=checkpoint_dir, 390 restoring_file = checkpoint_dir or checkpoint_filename_with_path
|
D | monitored_session.py | 318 checkpoint_dir=None, argument 366 summary_dir = summary_dir or checkpoint_dir 397 checkpoint_dir): 401 checkpoint_dir, 409 os.path.join(checkpoint_dir, tmpdir), 419 checkpoint_dir=checkpoint_dir, 431 checkpoint_dir=None, argument 541 checkpoint_dir=checkpoint_dir, 571 checkpoint_dir=checkpoint_dir, 575 summary_dir = summary_dir or checkpoint_dir [all …]
|
D | checkpoint_utils.py | 122 def wait_for_new_checkpoint(checkpoint_dir, argument 140 logging.info("Waiting for new checkpoint at %s", checkpoint_dir) 143 checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir) 154 def checkpoints_iterator(checkpoint_dir, argument 199 checkpoint_dir, checkpoint_path, timeout=timeout)
|
D | monitored_session_test.py | 294 is_chief=True, checkpoint_dir=logdir) as session: 300 is_chief=True, checkpoint_dir=logdir) as session: 310 checkpoint_dir=logdir, 317 is_chief=True, checkpoint_dir=logdir) as session: 327 checkpoint_dir=logdir, 336 is_chief=True, checkpoint_dir=logdir) as session: 364 checkpoint_dir=test_dir) as session: 373 checkpoint_dir=test_dir) as session: 386 checkpoint_dir=logdir, 404 checkpoint_dir=logdir, [all …]
|
/external/tensorflow/tensorflow/python/distribute/failure_handling/ |
D | gce_failure_handler_test.py | 63 def _make_checkpoint_manager(checkpoint, checkpoint_dir, cluster_resolver): argument 71 checkpoint, directory=checkpoint_dir, max_to_keep=1) 76 checkpoint_dir, cluster_resolver.task_id), 105 checkpoint_dir, argument 157 fh_ckpt, checkpoint_dir, strategy.cluster_resolver) 161 checkpoint_dir, termination_config)) 183 logging.info(gfile.ListDirectory(checkpoint_dir)) 186 for a_file in gfile.ListDirectory(checkpoint_dir) 252 checkpoint_dir = os.path.join(self.get_temp_dir(), 'fh_ckpt/') 266 args=(checkpoint_dir, cluster_spec, input_arg, maintenance_event, [all …]
|
D | failure_handler_test.py | 63 def _make_checkpoint_manager(checkpoint, checkpoint_dir, cluster_resolver): argument 70 checkpoint, directory=checkpoint_dir, max_to_keep=1) 75 checkpoint_dir, cluster_resolver.task_id), 117 checkpoint_dir, argument 154 fh_ckpt, checkpoint_dir, strategy.cluster_resolver) 158 checkpoint_dir, termination_config)) 190 for a_file in gfile.ListDirectory(checkpoint_dir) 238 checkpoint_dir = os.path.join(self.get_temp_dir(), 'fh_ckpt') 251 args=(checkpoint_dir, cluster_spec, input_arg, 298 self.worker_fn(checkpoint_dir, cluster_spec, input_arg, [all …]
|
D | failure_handling.py | 55 def _non_chief_checkpoint_dir(checkpoint_dir, task_id): argument 57 dirpath = os.path.dirname(checkpoint_dir) 58 base = os.path.basename(checkpoint_dir) 394 checkpoint_dir=None, argument 442 checkpoint_lib.Checkpoint) and not checkpoint_dir: 455 checkpoint_dir, cluster_resolver) 533 checkpoint_dir, cluster_resolver): argument 546 directory=checkpoint_dir, 555 _non_chief_checkpoint_dir(checkpoint_dir,
|
/external/tensorflow/tensorflow/python/distribute/ |
D | checkpoint_utils_test.py | 38 def _create_checkpoints(sess, checkpoint_dir): argument 39 checkpoint_prefix = os.path.join(checkpoint_dir, "model") 58 checkpoint_dir = self.get_temp_dir() 60 v1, v2 = _create_checkpoints(session, checkpoint_dir) 61 return checkpoint_dir, v1, v2 76 checkpoint_dir, v1_value, v2_value = self._get_test_object() 84 checkpoint_utils.init_from_checkpoint(checkpoint_dir, { 112 checkpoint_dir, v1_value, _ = self._get_test_object() 120 checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
|
D | tpu_strategy_model_parallelism_test.py | 151 checkpoint_dir = self.get_temp_dir() 152 checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") 165 checkpoint_management.latest_checkpoint(checkpoint_dir)) 334 checkpoint_dir = self.get_temp_dir() 335 checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") 356 checkpoint_management.latest_checkpoint(checkpoint_dir))
|
/external/tensorflow/tensorflow/core/kernels/ |
D | checkpoint_callback_manager.cc | 49 absl::string_view checkpoint_dir, in TriggerSaveCallbackIfFileNotExist() argument 53 checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension)); in TriggerSaveCallbackIfFileNotExist() 84 absl::string_view checkpoint_dir, in TriggerRestoreCallbackIfFileExists() argument 88 checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension)); in TriggerRestoreCallbackIfFileExists() 165 std::string checkpoint_dir; in RegisterSaveCallback() local 178 checkpoint_dir = last_saved_checkpoint_id_and_dir_.second; in RegisterSaveCallback() 183 TriggerSaveCallbackIfFileNotExist(checkpoint_id, checkpoint_dir, in RegisterSaveCallback() 199 std::string checkpoint_dir; in RegisterRestoreCallback() local 212 checkpoint_dir = last_restored_checkpoint_id_and_dir_.second; in RegisterRestoreCallback() 217 TriggerRestoreCallbackIfFileExists(checkpoint_id, checkpoint_dir, in RegisterRestoreCallback()
|
/external/tensorflow/tensorflow/python/checkpoint/ |
D | checkpoint_management.py | 249 def get_checkpoint_state(checkpoint_dir, latest_filename=None): argument 267 if isinstance(checkpoint_dir, os.PathLike): 268 checkpoint_dir = os.fspath(checkpoint_dir) 270 coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir, 283 + checkpoint_dir) 287 ckpt.model_checkpoint_path = os.path.join(checkpoint_dir, 291 ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p) 327 def latest_checkpoint(checkpoint_dir, latest_filename=None): argument 350 ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | worker_training_state.py | 44 def __init__(self, model, checkpoint_dir): argument 75 directory=os.path.join(checkpoint_dir, 'chief'), 78 checkpoint_dir, self._model.distribute_strategy)
|
/external/libopus/dnn/torch/lpcnet/ |
D | train_lpcnet.py | 126 checkpoint_dir = os.path.join(args.output, 'checkpoints') variable 127 os.makedirs(checkpoint_dir, exist_ok=True) 253 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_best.pth')) 256 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_epoch_{ep}.pth')) 257 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_last.pth'))
|
/external/libopus/dnn/torch/osce/ |
D | train_vocoder.py | 103 checkpoint_dir = os.path.join(args.output, 'checkpoints') variable 104 os.makedirs(checkpoint_dir, exist_ok=True) 269 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_best.pth')) 281 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_epoch_{ep}.pth')) 282 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_last.pth'))
|
D | train_model.py | 113 checkpoint_dir = os.path.join(args.output, 'checkpoints') variable 114 os.makedirs(checkpoint_dir, exist_ok=True) 289 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_best.pth')) 301 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_epoch_{ep}.pth')) 302 torch.save(checkpoint, os.path.join(checkpoint_dir, checkpoint_prefix + f'_last.pth'))
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.train.pbtxt | 89 …argspec: "args=[\'checkpoint_dir\', \'min_interval_secs\', \'timeout\', \'timeout_fn\'], varargs=N… 93 …argspec: "args=[\'checkpoint_dir\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\… 97 …argspec: "args=[\'checkpoint_dir\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\…
|
/external/libopus/dnn/torch/lossgen/ |
D | train_lossgen.py | 44 checkpoint_dir='checkpoint' variable 45 os.makedirs(checkpoint_dir, exist_ok=True) 95 checkpoint_path = os.path.join(checkpoint_dir, f'lossgen_{epoch}.pth')
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.train.-session-manager.pbtxt | 11 …argspec: "args=[\'self\', \'master\', \'init_op\', \'saver\', \'checkpoint_dir\', \'checkpoint_fil… 15 …argspec: "args=[\'self\', \'master\', \'saver\', \'checkpoint_dir\', \'checkpoint_filename_with_pa…
|
/external/executorch/examples/models/llama/ |
D | export_llama_lib.py | 524 checkpoint_dir = ( 525 canonical_path(args.checkpoint_dir) if args.checkpoint_dir else None 543 checkpoint_dir=checkpoint_dir, 850 checkpoint_dir: Optional[str] = None, 878 checkpoint or checkpoint_dir 901 checkpoint_dir=checkpoint_dir,
|
/external/libopus/dnn/torch/plc/ |
D | train_plc.py | 42 checkpoint_dir = os.path.join(args.output, 'checkpoints') variable 44 os.makedirs(checkpoint_dir, exist_ok=True) 141 checkpoint_path = os.path.join(checkpoint_dir, f'plc{args.suffix}_{epoch}.pth')
|
/external/libopus/dnn/torch/fargan/ |
D | train_fargan.py | 43 checkpoint_dir = os.path.join(args.output, 'checkpoints') variable 45 os.makedirs(checkpoint_dir, exist_ok=True) 164 checkpoint_path = os.path.join(checkpoint_dir, f'fargan{args.suffix}_{epoch}.pth')
|
/external/tensorflow/tensorflow/python/tpu/ |
D | async_checkpoint.py | 43 checkpoint_dir: Text, 68 save_path = os.path.join(checkpoint_dir, checkpoint_basename) 78 self._checkpoint_dir = checkpoint_dir
|