/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | c_api_conversions.cc | 212 void ToC(const xla::Shape& xla_shape, XLA_Shape* c_shape) { in ToC() argument 213 c_shape->element_type = xla_shape.element_type(); in ToC() 215 CopyVector(xla_shape.dimensions(), &c_shape->dimensions); in ToC() 216 CopyVector(xla_shape.dynamic_dimensions(), &c_shape->dynamic_dimensions); in ToC() 218 c_shape->ntuple_shapes = xla_shape.tuple_shapes_size(); in ToC() 219 if (c_shape->ntuple_shapes > 0) { in ToC() 220 c_shape->tuple_shapes = new XLA_Shape[c_shape->ntuple_shapes]; in ToC() 221 for (int i = 0; i < c_shape->ntuple_shapes; ++i) { in ToC() 222 ToC(xla_shape.tuple_shapes(i), &c_shape->tuple_shapes[i]); in ToC() 227 ToC(xla_shape.layout(), &c_shape->layout); in ToC() [all …]
|
D | tpu_transfer_manager.cc | 133 XLA_Shape c_shape; in TransferLiteralFromOutfeed() local 137 ApiConverter::ToC(literal.shape(), &c_shape); in TransferLiteralFromOutfeed() 141 manager_, tpu_executor->se_executor(), &c_shape, &c_literal, in TransferLiteralFromOutfeed() 144 ApiConverter::Free(&c_shape); in TransferLiteralFromOutfeed() 213 XLA_Shape c_shape; in GetByteSizeRequirement() local 214 ApiConverter::ToC(shape, &c_shape); in GetByteSizeRequirement() 218 manager_, &c_shape); in GetByteSizeRequirement() 220 ApiConverter::Free(&c_shape); in GetByteSizeRequirement() 279 XLA_Shape c_shape; in WriteSingleTupleIndexTable() local 280 ApiConverter::ToC(shape, &c_shape); in WriteSingleTupleIndexTable() [all …]
|
D | c_api_conversions.h | 46 xla::Shape FromC(const XLA_Shape* c_shape); 47 void ToC(const xla::Shape& xla_shape, XLA_Shape* c_shape); 48 void Free(XLA_Shape* c_shape); 62 xla::ShapeIndex FromC(XLA_ShapeIndex* c_shape);
|
D | tpu_executable.cc | 119 XLA_Shape c_shape; in ShapeSize() local 120 ApiConverter::ToC(shape, &c_shape); in ShapeSize() 122 tensorflow::tpu::OpsApiFn()->HardwareLayout_ShapeSizeFn(&c_shape); in ShapeSize() 123 ApiConverter::Free(&c_shape); in ShapeSize()
|
D | tpu_executor_c_api.h | 305 XLA_Shape* c_shape);
|
/external/tensorflow/tensorflow/core/tpu/kernels/xla/ |
D | infeed_op.cc | 35 XLA_Shape c_shape; in GetTPUInfeedLayout() local 38 ApiConverter::ToC(shape, &c_shape); in GetTPUInfeedLayout() 40 tpu::ExecutorApiFn()->TpuTransferManager_GetInfeedLayoutFn(&c_shape, in GetTPUInfeedLayout() 43 ApiConverter::Free(&c_shape); in GetTPUInfeedLayout()
|
/external/tensorflow/tensorflow/core/tpu/ |
D | tpu_execute.cc | 119 XLA_Shape c_shape; in ShapeSizeCompact() local 120 ApiConverter::ToC(shape, &c_shape); in ShapeSizeCompact() 122 tensorflow::tpu::OpsApiFn()->HardwareLayout_ShapeSizeCompactFn(&c_shape); in ShapeSizeCompact() 123 ApiConverter::Free(&c_shape); in ShapeSizeCompact() 128 XLA_Shape c_shape; in ShapeSizeCompactRaw() local 129 ApiConverter::ToC(shape, &c_shape); in ShapeSizeCompactRaw() 132 &c_shape); in ShapeSizeCompactRaw() 133 ApiConverter::Free(&c_shape); in ShapeSizeCompactRaw()
|
D | tpu_on_demand_compiler.cc | 360 XLA_Shape c_shape; in ShapeSizeBytesFunction() local 361 ApiConverter::ToC(shape, &c_shape); in ShapeSizeBytesFunction() 363 ExecutorApiFn()->TpuCompiler_ShapeSizeFn(compiler_, &c_shape); in ShapeSizeBytesFunction() 364 ApiConverter::Free(&c_shape); in ShapeSizeBytesFunction()
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer_test.cc | 56 Shape c_shape = ShapeUtil::MakeShape(S64, {}); in TEST() local 59 TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, MakeArray(c_shape, client)); in TEST() 72 client->backend().transfer_manager()->HostShapeToDeviceShape(c_shape)); in TEST()
|
/external/tensorflow/tensorflow/python/ops/linalg/sparse/ |
D | sparse_csr_matrix_ops.py | 140 c_shape = tensor_shape.TensorShape(a_shape) 141 c_shape = tensor_shape.TensorShape(c_shape[:rank - 2] + [c_rows, c_cols]) 142 c_handle = _create_handle_data_proto(c_shape.as_proto(),
|
/external/tensorflow/tensorflow/python/ops/ |
D | bincount_ops.py | 425 c_ind, c_val, c_shape = gen_count_ops.sparse_count_sparse_output( 435 c_ind, c_val, c_shape = gen_count_ops.ragged_count_sparse_output( 444 c_ind, c_val, c_shape = gen_count_ops.dense_count_sparse_output( 451 return sparse_tensor.SparseTensor(c_ind, c_val, c_shape)
|
/external/tensorflow/tensorflow/core/kernels/sparse/ |
D | mat_mul_op.cc | 524 TensorShape c_shape; in Compute() local 525 if (rank == 3) c_shape.AddDim(batch_size); in Compute() 527 c_shape.AddDim(b_outer_dim); in Compute() 528 c_shape.AddDim(a_outer_dim); in Compute() 530 c_shape.AddDim(a_outer_dim); in Compute() 531 c_shape.AddDim(b_outer_dim); in Compute() 534 const int64 c_matrix_lhs = c_shape.dim_size(row_dim); in Compute() 535 const int64 c_matrix_rhs = c_shape.dim_size(row_dim + 1); in Compute() 538 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, c_shape, &c_t)); in Compute()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | infeed_ops.cc | 58 XLA_Shape c_shape; in GetTPUInfeedLayout() local 61 ApiConverter::ToC(shape, &c_shape); in GetTPUInfeedLayout() 63 tpu::ExecutorApiFn()->TpuTransferManager_GetInfeedLayoutFn(&c_shape, in GetTPUInfeedLayout() 66 ApiConverter::Free(&c_shape); in GetTPUInfeedLayout()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | constant_folding.cc | 3349 const PartialTensorShape c_shape( in ConstantPushDown() local 3354 if (c_shape.IsFullyDefined() && x_shape.IsFullyDefined() && in ConstantPushDown() 3355 c_shape.num_elements() > x_shape.num_elements()) { in ConstantPushDown() 3357 } else if (!c_shape.unknown_rank() && !x_shape.unknown_rank() && in ConstantPushDown() 3358 c_shape.dims() > 0) { in ConstantPushDown() 3359 for (int idx = 0; idx < std::min(x_shape.dims(), c_shape.dims()); ++idx) { in ConstantPushDown() 3361 c_shape.dim_size(idx) > x_shape.dim_size(idx)) { in ConstantPushDown()
|