/external/tensorflow/tensorflow/python/keras/benchmarks/ |
D | distribution_util.py | 86 def get_distribution_strategy(distribution_strategy="mirrored", argument 108 distribution_strategy = distribution_strategy.lower() 110 if distribution_strategy == "off": 116 if distribution_strategy == "multi_worker_mirrored": 120 if distribution_strategy == "one_device": 128 if distribution_strategy == "mirrored": 138 distribution_strategy)
|
D | benchmark_util.py | 109 distribution_strategy='off'): argument 163 distribution_strategy=distribution_strategy, num_gpus=num_gpus) 215 'distribution_strategy': distribution_strategy,
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | distributed_training_utils_v1.py | 55 def set_weights(distribution_strategy, dist_model, weights): argument 76 assign_ops.append(distribution_strategy.unwrap(sw.assign(w))) 83 def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs, argument 113 all_inputs = flatten_per_replica_values(distribution_strategy, 115 all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs, 119 all_updates = flatten_per_replica_values(distribution_strategy, 129 distribution_strategy, grouped_feed_dict) 134 distribution_strategy, grouped_fetches) 172 def unwrap_outputs(distribution_strategy, grouped_outputs, argument 194 return flatten_per_replica_values(distribution_strategy, [all …]
|
D | distributed_training_utils.py | 28 def global_batch_size_supported(distribution_strategy): argument 29 return distribution_strategy.extended._global_batch_size # pylint: disable=protected-access
|
/external/tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/ |
D | mnist_conv_custom_training_benchmark_test.py | 90 batch_size, distribution_strategy): argument 105 per_replica_losses = distribution_strategy.run( 114 return distribution_strategy.reduce( 123 distribution_strategy=None, argument 158 if distribution_strategy is not None: 162 distribution_strategy) 181 distribution_strategy=None): argument 209 if distribution_strategy is not None and \ 214 if distribution_strategy is None and \ 236 distribution_strategy, batch_size) [all …]
|
D | bidirectional_lstm_benchmark_test.py | 121 distribution_strategy='mirrored',
|
D | reuters_mlp_benchmark_test.py | 126 distribution_strategy='mirrored',
|
D | mnist_conv_benchmark_test.py | 126 distribution_strategy='mirrored',
|
D | mnist_hierarchical_rnn_benchmark_test.py | 127 distribution_strategy='mirrored',
|
D | mnist_irnn_benchmark_test.py | 124 distribution_strategy='mirrored',
|
D | antirectifier_benchmark_test.py | 119 distribution_strategy="mirrored",
|
D | cifar10_cnn_benchmark_test.py | 135 distribution_strategy='mirrored',
|
D | text_classification_transformer_benchmark_test.py | 131 distribution_strategy='mirrored',
|
D | README.md | 224 `distribution_strategy` and etc. You can check examples from
|
/external/tensorflow/tensorflow/python/training/ |
D | slot_creator.py | 150 distribution_strategy = distribution_strategy_context.get_strategy() 151 with distribution_strategy.extended.colocate_vars_with(primary): 209 distribution_strategy = distribution_strategy_context.get_strategy() 210 with distribution_strategy.extended.colocate_vars_with(primary):
|
D | sync_replicas_optimizer.py | 261 distribution_strategy = distribution_strategy_context.get_strategy() 262 with distribution_strategy.extended.colocate_vars_with(local_anchor):
|
D | optimizer.py | 821 distribution_strategy = distribute_ctx.get_strategy() 822 with distribution_strategy.extended.colocate_vars_with(colocate_with):
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_arrays_v1.py | 536 def _get_iterator(inputs, distribution_strategy=None): argument 537 if distribution_strategy: 539 inputs, distribution_strategy) 543 def _reinitialize_iterator(iterator, distribution_strategy=None): argument 544 if distribution_strategy: 546 iterator, distribution_strategy)
|
D | data_adapter.py | 1170 distribution_strategy=ds_context.get_strategy(),
|
/external/tensorflow/tensorflow/python/keras/ |
D | backend.py | 6504 def configure_and_create_distributed_session(distribution_strategy): argument 6507 def _create_session(distribution_strategy): argument 6517 if is_tpu_strategy(distribution_strategy): 6521 distribution_strategy.configure(session_config) 6522 …master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protecte… 6535 distribution_strategy.configure(session_config) 6540 if distribution_strategy.extended._in_multi_worker_mode(): 6543 distribution_strategy, 6546 _create_session(distribution_strategy)
|