/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_schedule_test.cc | 59 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 93 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 142 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 186 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 247 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F() 316 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F()
|
D | hlo_memory_scheduler_test.cc | 91 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 142 return ShapeUtil::ByteSizeOf(buffer.shape(), /*pointer_size=*/8); in TEST_F() 195 return ShapeUtil::ByteSizeOf(buffer.shape(), /*pointer_size=*/8); in TEST_F() 249 return ShapeUtil::ByteSizeOf(buffer.shape(), TUPLE_SIZE); in TEST_F() 300 return ShapeUtil::ByteSizeOf(buffer.shape(), 2); in TEST_F()
|
D | hlo_rematerialization_test_utils.h | 139 static int64 ByteSizeOf(const Shape& shape) { in ByteSizeOf() function 140 return ShapeUtil::ByteSizeOf(shape, sizeof(void*)); in ByteSizeOf()
|
D | hlo_input_output_alias_config_test.cc | 169 return ShapeUtil::ByteSizeOf(shape); in TEST_F() 195 return ShapeUtil::ByteSizeOf(shape); in TEST_F()
|
D | hlo_execution_profile_test.cc | 50 return ShapeUtil::ByteSizeOf(shape, pointer_size); in TEST_F()
|
D | hlo_rematerialization_test.cc | 47 [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); }, in RunHloRematerialization() 51 ByteSizeOf, memory_limit_bytes, in RunHloRematerialization() 291 /*memory_limit_bytes=*/4 * ByteSizeOf(vec1024_shape_), module.get())); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | thunk_emitter.h | 37 virtual int64 ByteSizeOf(const Shape& shape) const = 0; 63 int64 ByteSizeOf(const Shape& shape) { return context_->ByteSizeOf(shape); } in ByteSizeOf() function
|
D | fusion_merger.cc | 77 bytes += ShapeUtil::ByteSizeOf(user->shape()); in CalculateBytesReadByFusionParameter() 81 bytes += ShapeUtil::ByteSizeOf(instruction->shape()); in CalculateBytesReadByFusionParameter() 107 bytes_written += ShapeUtil::ByteSizeOf(operand->shape()); in GetCurrentBytesTransferred() 111 ShapeUtil::ByteSizeOf(fusion->fused_expression_root()->shape()); in GetCurrentBytesTransferred()
|
D | cudnn_pad_for_convolutions.cc | 246 int64 old_bytes = ShapeUtil::ByteSizeOf(old_shape); in TryResolvePaddedShapesForTensorCore() 247 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape); in TryResolvePaddedShapesForTensorCore() 355 int64 old_bytes = ShapeUtil::ByteSizeOf(old_shape); in TryResolvePaddedShapesForIntegerConvolution() 356 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape); in TryResolvePaddedShapesForIntegerConvolution()
|
D | ir_emitter_nested.cc | 57 llvm_ir::ByteSizeOf(param_shape, module_->getDataLayout()); in CodegenNestedComputation() 66 int64 root_size = llvm_ir::ByteSizeOf( in CodegenNestedComputation()
|
D | thunk_emitter.cc | 88 /*mem_size=*/ShapeUtil::ByteSizeOf(inst->shape()), nullptr)); in BuildGemmThunk() 265 /*mem_size=*/ShapeUtil::ByteSizeOf(shape), custom_call)); in HandleCustomCall() 347 /*mem_size=*/ShapeUtil::ByteSizeOf(hlo->operand(1)->shape()), hlo)); in HandleTriangularSolve()
|
D | gpu_compiler.h | 98 return ShapeUtil::ByteSizeOf(shape, pointer_size); in ShapeSizeBytesFunction()
|
D | gpu_conv_algorithm_picker.cc | 396 ShapeUtil::ByteSizeOf(operand->shape()))); in PickBestAlgorithmNoCacheCuda() 402 ShapeUtil::ByteSizeOf(result_shape))); in PickBestAlgorithmNoCacheCuda() 663 ShapeUtil::ByteSizeOf(operand->shape()))); in PickBestAlgorithmNoCacheRocm() 671 ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(0)))); in PickBestAlgorithmNoCacheRocm()
|
D | ir_emitter_unnested.h | 148 int64 ByteSizeOf(const Shape& shape) const override { in ByteSizeOf() function 149 return llvm_ir::ByteSizeOf( in ByteSizeOf()
|
D | gpu_hlo_schedule.cc | 206 return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size); in Build()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/tests/ |
D | cpu_bytesizeof_test.cc | 27 EXPECT_EQ(xla::llvm_ir::ByteSizeOf(tuple_shape, data_layout), in TEST_F() 35 EXPECT_EQ(xla::llvm_ir::ByteSizeOf(tuple_shape, data_layout), in TEST_F()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/ |
D | object.h | 85 inline size_t ByteSizeOf(const Object& object); 120 inline size_t ByteSizeOf(const Object& object) { in ByteSizeOf() function
|
D | api.cc | 208 object_sizes_[GetRef(object)] = ByteSizeOf(object); in Add() 287 object_sizes_[GetRef(object)] = ByteSizeOf(object); in OnProgram()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util_test.cc | 311 EXPECT_EQ(4, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {}))); in TEST() 312 EXPECT_EQ(800, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {10, 20}))); in TEST() 315 EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {}))); in TEST() 316 EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {10, 20}))); in TEST() 319 EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {}))); in TEST() 320 EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {10, 20}))); in TEST() 323 EXPECT_EQ(0, ShapeUtil::ByteSizeOf(ShapeUtil::MakeTokenShape())); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/service/mlir_gpu/ |
D | mlir_compiler.h | 60 return ShapeUtil::ByteSizeOf(shape, pointer_size); in ShapeSizeBytesFunction()
|
D | lhlo_dialect_emitter.h | 84 int64 ByteSizeOf(const Shape& shape) const override;
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | testing.cc | 38 return ShapeUtil::ByteSizeOf(shape); in DataSizeOfShape()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | types.cc | 322 if (buffer_info.size * buffer_info.itemsize != ShapeUtil::ByteSizeOf(shape)) { in CastToArray() 325 " vs. ", ShapeUtil::ByteSizeOf(shape))); in CastToArray()
|
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/ |
D | executable.cc | 154 return ShapeUtil::ByteSizeOf(shape, sizeof(void*)); in ShapeSizeBytes()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_runtime.cc | 434 se::DeviceMemoryBase(input_buffer, xla::ShapeUtil::ByteSizeOf(shape)); in __xla_cpu_runtime_AllReduce() 436 se::DeviceMemoryBase(output_buffer, xla::ShapeUtil::ByteSizeOf(shape)); in __xla_cpu_runtime_AllReduce()
|