/external/tensorflow/tensorflow/core/kernels/ |
D | transpose_functor.h | 96 int dim_idx = 0; in ReduceTransposeDimensions() local 102 combined_dims[dim_idx] *= shape.dim_size(cur_head); in ReduceTransposeDimensions() 106 dim_idx++; in ReduceTransposeDimensions() 107 new_dim_position[cur_head] = dim_idx; in ReduceTransposeDimensions() 108 combined_dims[dim_idx] = shape.dim_size(cur_head); in ReduceTransposeDimensions() 112 new_perm->resize(dim_idx + 1); in ReduceTransposeDimensions() 113 new_dims->resize(dim_idx + 1); in ReduceTransposeDimensions() 114 dim_idx = 0; in ReduceTransposeDimensions() 118 (*new_perm)[dim_idx] = new_perm_idx; in ReduceTransposeDimensions() 119 (*new_dims)[dim_idx] = combined_dims[new_perm_idx]; in ReduceTransposeDimensions() [all …]
|
D | debug_ops.h | 636 int dim_idx = 4; 640 output_tensor->flat<Tout>()(dim_idx++) = 643 output_tensor->flat<Tout>()(dim_idx++) = 0.0; 855 int dim_idx = 4; 857 static_output[dim_idx++] = static_cast<Tout>(tensor.dim_size(i));
|
/external/tensorflow/tensorflow/dtensor/mlir/ |
D | collectives_common.cc | 58 for (int64 dim_idx = 0; dim_idx < device_loc.size(); ++dim_idx) { in GetAllReducePartitionsFromReducedDims() local 59 if (!reduced_dims.contains(output_layout.mesh().dim_name(dim_idx))) { in GetAllReducePartitionsFromReducedDims() 60 kept_dims.push_back(device_loc[dim_idx]); in GetAllReducePartitionsFromReducedDims()
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_batch_matmul_helper.h | 47 for (int dim_idx = 0; dim_idx < ndims_input; ++dim_idx) in ExpandInputDimsToOutputShape() local 48 reshaped_dims->at(dim_idx + dim_offset) = input_dims[dim_idx]; in ExpandInputDimsToOutputShape()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | tree_reduction_rewriter.cc | 71 for (int64_t dim_idx = 0; dim_idx < input_shape.rank(); dim_idx++) { in HandleReduce() local 72 if (!absl::c_linear_search(hlo->dimensions(), dim_idx)) { in HandleReduce() 79 std::min(input_shape.dimensions(dim_idx), reduce_window_size_); in HandleReduce()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | tree_reduction_rewriter.cc | 161 for (int64_t dim_idx = 0; dim_idx < padded[0]->shape().dimensions_size(); in RewriteReduction() local 162 dim_idx++) { in RewriteReduction() 163 if (dim_idx == reduced_input_dimension) { in RewriteReduction() 172 reshaped_dimensions.push_back(padded[0]->shape().dimensions(dim_idx)); in RewriteReduction()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorContraction.h | 504 int dim_idx = 0; 518 m_dimensions[dim_idx] = eval_left_dims[i]; 520 if (dim_idx != i) { 529 dim_idx++; 545 m_dimensions[dim_idx] = eval_right_dims[i]; 553 dim_idx++;
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | sparsify_gather.cc | 400 Tensor dim_idx(DT_INT32, TensorShape({})); in SparsifyGatherInternal() local 401 dim_idx.flat<int32>()(0) = -1; in SparsifyGatherInternal() 406 SetNodeAttr("value", dim_idx, &dim_idx_node); in SparsifyGatherInternal()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util.cc | 473 for (int dim_idx = 0; dim_idx < shape->layout().minor_to_major_size(); in AppendMinorDimension() local 474 dim_idx++) { in AppendMinorDimension() 475 int layout_idx = shape->layout().minor_to_major(dim_idx); in AppendMinorDimension() 476 shape->mutable_layout()->set_minor_to_major(dim_idx, layout_idx + 1); in AppendMinorDimension()
|
/external/tensorflow/tensorflow/compiler/mlir/xla/transforms/ |
D | legalize_tf.cc | 4753 for (int64_t dim_idx = 0; dim_idx < input_rank; ++dim_idx) { in matchAndRewrite() local 4755 loc, rewriter.getIndexAttr(dim_idx)); in matchAndRewrite() 4761 out_dim_size.push_back(input_shape_values[dim_idx]); in matchAndRewrite() 4765 for (int64_t dim_idx = 0; dim_idx < input_rank; ++dim_idx) { in matchAndRewrite() local 4766 broadcast_dimensions.push_back(1 + 2 * dim_idx); in matchAndRewrite() 5574 for (int64_t dim_idx = 0; dim_idx < value_rank; ++dim_idx) { in matchAndRewrite() local 5575 int64_t dim_size = value_type.getDimSize(dim_idx); in matchAndRewrite() 5579 rewriter.create<tensor::DimOp>(loc, op.getOperand(), dim_idx)); in matchAndRewrite() 5581 if (dim_idx != axis) { in matchAndRewrite() 5589 if (dim_idx != axis) { in matchAndRewrite() [all …]
|
/external/tensorflow/tensorflow/core/tpu/graph_rewrite/ |
D | distributed_tpu_rewrite_pass.cc | 2990 for (int dim_idx = 0; dim_idx < sharded_rank; ++dim_idx) { in ComputeShardedArgShapes() local 2992 shape->dim_size(dim_idx), sharding.tile_assignment_dimensions(dim_idx)); in ComputeShardedArgShapes() 2993 shape->set_dim(dim_idx, sharded_dim); in ComputeShardedArgShapes()
|