/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_context.py | 97 replicas_consumed = self._internal_ctx.num_replicas 105 def num_replicas(self): member in TPUContext 114 return self._internal_ctx.num_replicas 291 num_replicas=self.num_replicas) 296 logging.info('num_replicas: %d', self.num_replicas) 356 return self.num_replicas // self.num_hosts 361 def num_replicas(self): member in _InternalTPUContext 478 return global_batch_size // self.num_replicas 491 return global_batch_size // self.num_replicas 597 num_replicas = self.num_replicas [all …]
|
D | device_assignment.py | 114 def num_replicas(self): member in DeviceAssignment 172 num_replicas=1): argument 174 num_replicas) 180 num_replicas=1): argument 265 if num_replicas > max_replicas: 269 num_replicas, max_replicas, computation_shape, computation_stride, 276 if num_replicas > 0: 277 remaining_replicas = num_replicas 298 replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32) 299 for replica in xrange(num_replicas):
|
D | tpu.py | 139 def __init__(self, name, num_replicas, pivot): argument 153 self._num_replicas = num_replicas 654 num_replicas = len(padded_inputs) 655 for i in range(num_replicas): 745 num_replicas = len(inputs) 748 if num_replicas == 0: 752 for i in xrange(1, num_replicas): 766 for i in range(num_replicas): 824 replicas = [flat_inputs[replica][i] for replica in xrange(num_replicas)] 831 name=cluster_name, num_replicas=num_replicas, pivot=pivot) [all …]
|
D | training_loop.py | 164 num_replicas = tpu_function.get_tpu_context().number_of_shards 165 if num_replicas is None: 166 num_replicas = 1 170 num_replicas)
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_runner.cc | 285 argument_buffers.reserve(options.num_replicas * options.arguments.size()); in ExecuteReplicated() 290 options.num_replicas * options.arguments.size() + 1); in ExecuteReplicated() 293 for (int64 i = 0; i < options.num_replicas; ++i) { in ExecuteReplicated() 319 int64 num_threads = (options.infeed != nullptr) ? options.num_replicas : 0; in ExecuteReplicated() 321 num_threads += options.num_replicas; in ExecuteReplicated() 329 for (int64 i = 0; i < options.num_replicas; ++i) { in ExecuteReplicated() 347 for (int64 i = 0; i < options.num_replicas; ++i) { in ExecuteReplicated() 378 options.num_replicas); in ExecuteReplicated() 380 LOG(INFO) << "Creating thread pool for " << options.num_replicas in ExecuteReplicated() 383 "replicas", options.num_replicas); in ExecuteReplicated() [all …]
|
D | hlo_runner.h | 50 int64 num_replicas = 1; member
|
/external/tensorflow/tensorflow/python/keras/utils/ |
D | losses_utils.py | 179 num_replicas = ( # Used to convert from local to global batch size. 181 loss = _safe_mean(loss, num_replicas * _num_elements(weighted_losses)) 239 num_replicas = ( 241 if num_replicas > 1: 242 loss_value *= (1. / num_replicas)
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/resnet50/ |
D | resnet50_test.py | 53 def compute_gradients(model, images, labels, num_replicas=1): argument 59 if num_replicas != 1: 60 loss /= num_replicas 201 num_replicas=1): argument 204 replica_str = '' if num_replicas == 1 else 'replicas_%d_' % num_replicas 207 extras = {'examples_per_sec': (num_replicas * batch_size) / avg_time}
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | local_computation_builder.cc | 218 int num_replicas = device_assignment_.replica_count(); in DeviceOrdinals() local 220 device_ordinals.reserve(num_replicas); in DeviceOrdinals() 221 for (int i = 0; i < num_replicas; ++i) { in DeviceOrdinals() 229 if (num_replicas() != 1) { in Execute() 232 num_replicas()); in Execute() 268 if (argument_handles.size() != num_replicas()) { in ExecutePerReplica() 279 VLOG(1) << "Executing with " << num_replicas() << " replicas."; in ExecutePerReplica() 281 std::vector<StatusOr<ScopedShapedBuffer>> results(num_replicas()); in ExecutePerReplica() 305 if (num_replicas() == 1) { in ExecutePerReplica() 312 num_replicas() - 1); in ExecutePerReplica() [all …]
|
D | local_computation_builder.i | 245 int64 num_replicas; variable 246 if (!GetIntAttr($input, "num_replicas", &num_replicas)) { 249 build_options.set_num_replicas(num_replicas);
|
/external/tensorflow/tensorflow/contrib/tpu/python/tpu/ |
D | keras_tpu_variables.py | 261 def replicated_scope(num_replicas): argument 273 for i in range(num_replicas): 302 def replicated_variable_for_optimizer(num_replicas): argument 304 if num_replicas == 1: 317 for i in range(num_replicas):
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | executable_build_options.h | 72 int num_replicas() const { return num_replicas_; } in num_replicas() function 73 ExecutableBuildOptions& set_num_replicas(int num_replicas);
|
D | executable_build_options.cc | 62 int num_replicas) { in set_num_replicas() argument 63 num_replicas_ = num_replicas; in set_num_replicas()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_compile_ops.cc | 88 int num_replicas = config.num_replicas() ? config.num_replicas() : 1; in Compile() local 89 TF_RET_CHECK(num_replicas == 1); in Compile()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | auto_parallel.h | 29 AutoParallel(int num_replicas) : num_replicas_(num_replicas) { in AutoParallel() argument
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.tpu.experimental.-device-assignment.pbtxt | 14 name: "num_replicas" 27 …argspec: "args=[\'topology\', \'computation_shape\', \'computation_stride\', \'num_replicas\'], va…
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.tpu.experimental.-device-assignment.pbtxt | 14 name: "num_replicas" 27 …argspec: "args=[\'topology\', \'computation_shape\', \'computation_stride\', \'num_replicas\'], va…
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | hlo_test_base.cc | 210 int64 num_replicas, bool use_threads) { in ExecuteReplicated() argument 212 options.num_replicas = num_replicas; in ExecuteReplicated() 222 int64 num_replicas, DeviceAssignment* device_assignment, in ExecuteReplicated() argument 225 options.num_replicas = num_replicas; in ExecuteReplicated()
|
D | hlo_test_base.h | 183 int64 num_replicas, bool use_threads); 188 int64 num_replicas, DeviceAssignment* device_assignment,
|
/external/tensorflow/tensorflow/python/ops/losses/ |
D | losses_impl.py | 193 num_replicas = ( # Used to convert from local to global batch size. 196 denom = (num_replicas * 201 loss = _safe_mean(loss, num_replicas * _num_present(losses, weights)) 203 loss = _safe_mean(loss, num_replicas * _num_elements(losses))
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | distributed_training_utils.py | 872 num_replicas = model._distribution_strategy.num_replicas_in_sync 873 nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | tpu_strategy.py | 218 if self._device_assignment.num_replicas != 1: 532 for r in range(self._device_assignment.num_replicas)])) 546 models_per_host = min(self._device_assignment.num_replicas, 554 return (self._device_assignment.num_replicas *
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_TPUReplicateMetadata.pbtxt | 5 name: "num_replicas"
|
D | api_def_TPUReplicate.pbtxt | 39 name: "num_replicas"
|
/external/tensorflow/tensorflow/contrib/distribute/python/ |
D | minimize_loss_test.py | 213 num_replicas = distribution.num_replicas_in_sync 216 batch_per_epoch=num_replicas, 248 return 60. + i + (num_replicas - 1.) / 2. * 100.
|