/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | auto_parallel.h | 29 AutoParallel(int num_replicas) : num_replicas_(num_replicas) { in AutoParallel() 30 CHECK(num_replicas_ >= 2); in AutoParallel() 51 int num_replicas_; variable
|
D | auto_parallel.cc | 46 tensor->add_float_val(static_cast<float>(num_replicas_)); in AddNodeDivConst() 243 for (int i = 0; i < num_replicas_; i++) { in BuildGraph() 248 for (int j = 0; j < num_replicas_; j++) { in BuildGraph()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | rebatch_dataset_op.cc | 62 num_replicas_(num_replicas), in Dataset() 90 params.set_args(num_replicas_); in DebugString() 105 TF_RETURN_IF_ERROR(b->AddScalar(num_replicas_, &num_replicas)); in AsGraphDefInternal() 129 if (slice_number_ % dataset()->num_replicas_ == 0) { in GetNextInternal() 150 CeilDiv(original_batch_dim, dataset()->num_replicas_); in GetNextInternal() 178 slice_number_ = (slice_number_ + 1) % dataset()->num_replicas_; in GetNextInternal() 195 if (slice_number_ % dataset()->num_replicas_ != 0) { in SaveInternal() 219 if (slice_number_ % dataset()->num_replicas_ != 0) { in RestoreInternal() 228 dataset()->num_replicas_); in RestoreInternal() 260 const int64 num_replicas_; member in tensorflow::data::experimental::__anon714a254d0111::RebatchDatasetOp::Dataset
|
D | auto_shard_dataset_op_test.cc | 35 num_replicas_(num_replicas), in AutoShardDatasetParams() 60 attr_vector->emplace_back(AutoShardDatasetOp::kNumReplicas, num_replicas_); in GetAttributes() 73 int64 num_replicas_; member in tensorflow::data::experimental::__anon8ac557bd0111::AutoShardDatasetParams
|
D | auto_shard_dataset_op.cc | 41 OP_REQUIRES_OK(ctx, ctx->GetAttr(kNumReplicas, &num_replicas_)); in AutoShardDatasetOp() 58 num_replicas = num_replicas_; in MakeDataset()
|
D | auto_shard_dataset_op.h | 46 int64 num_replicas_; variable
|
/external/tensorflow/tensorflow/core/tpu/graph_rewrite/ |
D | distributed_tpu_rewrite_pass.h | 151 : num_replicas_(num_replicas), in ParameterInfo() 159 int64 NumReplicas() const { return num_replicas_; } in NumReplicas() 204 return num_replicas_ * num_per_replica_args_ + num_distributed_args_ + in NumInputsFromHost() 217 return num_replicas_ * num_retvals_per_replica_; in NumOutputsToHost() 224 return num_replicas_ * num_per_replica_args_ + num_distributed_args_; in FirstBroadcastArgFromHost() 237 int64 num_replicas_ = 1;
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | executable_build_options.h | 80 int num_replicas() const { return num_replicas_; } in num_replicas() 147 int num_replicas_ = 1; variable
|
D | executable_build_options.cc | 64 num_replicas_ = num_replicas; in set_num_replicas() 106 device_ordinal_, result_layout, num_replicas_); in ToString()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | ar_crs_combiner.h | 78 num_replicas_(num_replicas), in ArCrsCombiner() 172 int num_replicas_; variable
|
D | ar_crs_combiner.cc | 168 HasCombinableReplicaGroup(instruction, num_replicas_, in MatchesArCrsPattern() 615 if (num_replicas_ > 1 && spmd_partition_) { in Run() 617 ReplaceReplicatedAllReduce(module, num_replicas_, in Run()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/data/ |
D | auto_shard.h | 51 int64 num_replicas_; variable
|
D | auto_shard.cc | 687 num_replicas_ = config->parameter_map().at(kNumReplicasAttrName).i(); in Init() 706 if (num_replicas_ < 0) { in Init() 719 auto_shard_policy_, num_replicas_, output)); in OptimizeAndCollectStats()
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner.h | 190 num_replicas_(num_replicas), in SpmdPartitioner() 247 const int64 num_replicas_; variable 486 state.num_replicas = num_replicas_; in MakePartitioningState() 525 int64 num_replicas_; variable
|
D | spmd_partitioner.cc | 1359 num_replicas_(num_replicas), in SpmdPartitioningVisitor() 3385 << num_replicas_ << " replicas and " << num_partitions_ in DoPartition() 3636 CreateVisitor(computation, num_partitions_, num_replicas_, in PartitionComputation()
|