/external/tensorflow/tensorflow/compiler/xla/service/ |
D | executable.h | 148 class ExecutionOutput { 150 explicit ExecutionOutput(ScopedShapedBuffer result) in ExecutionOutput() function 152 ExecutionOutput(ScopedShapedBuffer result, in ExecutionOutput() function 157 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() function 160 ExecutionOutput(Shape on_device_shape, se::DeviceMemoryAllocator* allocator, in ExecutionOutput() function 163 ExecutionOutput(ExecutionOutput&&) = default; 164 ExecutionOutput& operator=(ExecutionOutput&&) = default; 166 ~ExecutionOutput() { in ~ExecutionOutput() 185 ExecutionOutput& Commit() { in Commit() 288 StatusOr<ExecutionOutput> ExecuteOnStream( [all …]
|
D | executable.cc | 112 TF_ASSIGN_OR_RETURN(ExecutionOutput out, in ExecuteAsyncOnStream() 118 StatusOr<ExecutionOutput> Executable::ExecuteOnStream( in ExecuteOnStream() 122 StatusOr<ExecutionOutput> result = ExecuteAsyncOnStream( in ExecuteOnStream() 173 StatusOr<ExecutionOutput> Executable::ExecuteOnStreamWrapper( in ExecuteOnStreamWrapper() 176 StatusOr<ExecutionOutput> result = in ExecuteOnStreamWrapper() 290 StatusOr<ExecutionOutput> Executable::ExecuteAsyncOnStreamWrapper( in ExecuteAsyncOnStreamWrapper() 294 StatusOr<ExecutionOutput> return_value = ExecuteAsyncOnStream( in ExecuteAsyncOnStreamWrapper() 304 ExecutionOutput& result) { in MarkToBeReleasedArguments()
|
D | hlo_runner.h | 88 StatusOr<ExecutionOutput> ExecuteWithDeviceBuffers( 93 StatusOr<ExecutionOutput> ExecuteWithDeviceBuffers(
|
D | hlo_runner.cc | 97 TF_ASSIGN_OR_RETURN(ExecutionOutput result, in Execute() 111 TF_ASSIGN_OR_RETURN(ExecutionOutput result, in ExecuteWithExecutable() 150 StatusOr<ExecutionOutput> HloRunner::ExecuteWithDeviceBuffers( in ExecuteWithDeviceBuffers() 159 StatusOr<ExecutionOutput> HloRunner::ExecuteWithDeviceBuffers( in ExecuteWithDeviceBuffers() 176 ExecutionOutput retval, in ExecuteWithDeviceBuffers()
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | tpu_executable_interface.cc | 53 StatusOr<ExecutionOutput> 93 ExecutionOutput result(std::move(device_shape), allocator, device_ordinal); in AllocateOutputMemoryWithInputReuse() 168 StatusOr<ExecutionOutput> TpuExecutableInterface::ExecuteAsyncOnStream( in ExecuteAsyncOnStream() 186 ExecutionOutput result, in ExecuteAsyncOnStream()
|
D | tpu_executable_interface.h | 47 StatusOr<ExecutionOutput> ExecuteAsyncOnStream( 66 StatusOr<ExecutionOutput> AllocateOutputMemoryWithInputReuse(
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.h | 56 StatusOr<ExecutionOutput> Run(std::vector<ExecutionInput> arguments, 67 StatusOr<ExecutionOutput> RunAsync(std::vector<ExecutionInput> arguments, 77 StatusOr<ExecutionOutput> RunAsync(
|
D | local_client.cc | 186 StatusOr<ExecutionOutput> LocalExecutable::Run( in Run() 193 return AsyncCallAndBlockHostUntilDone<ExecutionOutput>( in Run() 280 StatusOr<ExecutionOutput> LocalExecutable::RunAsync( in RunAsync() 309 TF_ASSIGN_OR_RETURN(ExecutionOutput outputs, in RunAsync() 322 StatusOr<ExecutionOutput> LocalExecutable::RunAsync( in RunAsync()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_executable.h | 58 StatusOr<ExecutionOutput> ExecuteAsyncOnStream( 119 StatusOr<ExecutionOutput> CreateResultShapedBuffer(
|
D | cpu_executable.cc | 225 StatusOr<ExecutionOutput> CpuExecutable::CreateResultShapedBuffer( in CreateResultShapedBuffer() 230 ExecutionOutput result(/*on_device_shape=*/result_shape(), in CreateResultShapedBuffer() 325 StatusOr<ExecutionOutput> CpuExecutable::ExecuteAsyncOnStream( in ExecuteAsyncOnStream() 359 ExecutionOutput result, in ExecuteAsyncOnStream()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | tpu_execute_op.cc | 187 xla::StatusOr<xla::ExecutionOutput> RunExecutable( in RunExecutable() 215 xla::ExecutionOutput output, in RunExecutable() 229 xla::StatusOr<xla::ExecutionOutput> ExecuteTPUProgram( in ExecuteTPUProgram() 236 auto runfn = [&]() -> xla::StatusOr<xla::ExecutionOutput> { in ExecuteTPUProgram() 243 return memory_manager->Run<xla::ExecutionOutput>( in ExecuteTPUProgram() 355 xla::ExecutionOutput output, in DoWork() 456 xla::ExecutionOutput output, in DoWork()
|
D | xrt_execute_op.cc | 268 se::Stream* stream, xla::ExecutionOutput run_result, xla::Backend* backend, in CreateOutputTuple() 356 xla::ExecutionOutput run_result, in RunExecutable()
|
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/ |
D | executable_base.cc | 41 StatusOr<ExecutionOutput> InterpreterExecutableBase::ExecuteAsyncOnStream( in ExecuteAsyncOnStream() 135 ExecutionOutput result(std::move(result_buffers)); in ExecuteAsyncOnStream()
|
D | executable_base.h | 40 StatusOr<ExecutionOutput> ExecuteAsyncOnStream(
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_executable.h | 114 StatusOr<ExecutionOutput> ExecuteAsyncOnStream( 126 StatusOr<ExecutionOutput> ExecuteAsyncOnStreamImpl(
|
D | gpu_executable.cc | 398 StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStream( in ExecuteAsyncOnStream() 411 ExecutionOutput out, in ExecuteAsyncOnStream() 416 StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStreamImpl( in ExecuteAsyncOnStreamImpl() 438 ExecutionOutput result(/*on_device_shape=*/output_shape_, memory_allocator, in ExecuteAsyncOnStreamImpl()
|
/external/tensorflow/tensorflow/core/tpu/ |
D | tpu_execute.h | 40 xla::StatusOr<xla::ExecutionOutput> TPUExecute(
|
D | tpu_execute.cc | 412 xla::StatusOr<xla::ExecutionOutput> TPUExecute( in TPUExecute() 498 xla::StatusOr<xla::ExecutionOutput> output = in TPUExecute()
|
D | tpu_on_demand_compiler.cc | 99 StatusOr<ExecutionOutput> ExecuteAsyncOnStream( in ExecuteAsyncOnStream() 160 ExecutionOutput output(std::move(result)); in ExecuteAsyncOnStream()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_compile_on_demand_op.cc | 87 xla::StatusOr<xla::ExecutionOutput> run_result = in Run() 90 xla::ExecutionOutput execution_output = run_result.ConsumeValueOrDie(); in Run()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/tests/ |
D | mlir_gpu_test_base.cc | 81 StatusOr<ExecutionOutput> MlirGpuTestBase::RunMlirModule( in RunMlirModule() 130 TF_ASSIGN_OR_RETURN(ExecutionOutput output, in RunMlirModuleWithHostBuffers()
|
D | mlir_gpu_test_base.h | 55 StatusOr<ExecutionOutput> RunMlirModule(
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | buffer_donation_test.cc | 125 StatusOr<ExecutionOutput> output_status = in RunAndCheck() 137 ExecutionOutput output = output_status.ConsumeValueOrDie(); in RunAndCheck()
|
/external/tensorflow/tensorflow/compiler/jit/kernels/ |
D | xla_ops.cc | 279 xla::StatusOr<xla::ExecutionOutput> execution_output; in Compute() 515 xla::StatusOr<xla::ExecutionOutput> execution_output; in Compute()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_execute_op.cc | 755 xla::ExecutionOutput output, in DoWork()
|