Home
last modified time | relevance | path

Searched refs:ExecutionOutput (Results 1 – 25 of 26) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/
Dexecutable.h148 class ExecutionOutput {
150 explicit ExecutionOutput(ScopedShapedBuffer result) in ExecutionOutput() function
152 ExecutionOutput(ScopedShapedBuffer result, in ExecutionOutput() function
157 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() function
160 ExecutionOutput(Shape on_device_shape, se::DeviceMemoryAllocator* allocator, in ExecutionOutput() function
163 ExecutionOutput(ExecutionOutput&&) = default;
164 ExecutionOutput& operator=(ExecutionOutput&&) = default;
166 ~ExecutionOutput() { in ~ExecutionOutput()
185 ExecutionOutput& Commit() { in Commit()
288 StatusOr<ExecutionOutput> ExecuteOnStream(
[all …]
Dexecutable.cc112 TF_ASSIGN_OR_RETURN(ExecutionOutput out, in ExecuteAsyncOnStream()
118 StatusOr<ExecutionOutput> Executable::ExecuteOnStream( in ExecuteOnStream()
122 StatusOr<ExecutionOutput> result = ExecuteAsyncOnStream( in ExecuteOnStream()
173 StatusOr<ExecutionOutput> Executable::ExecuteOnStreamWrapper( in ExecuteOnStreamWrapper()
176 StatusOr<ExecutionOutput> result = in ExecuteOnStreamWrapper()
290 StatusOr<ExecutionOutput> Executable::ExecuteAsyncOnStreamWrapper( in ExecuteAsyncOnStreamWrapper()
294 StatusOr<ExecutionOutput> return_value = ExecuteAsyncOnStream( in ExecuteAsyncOnStreamWrapper()
304 ExecutionOutput& result) { in MarkToBeReleasedArguments()
Dhlo_runner.h88 StatusOr<ExecutionOutput> ExecuteWithDeviceBuffers(
93 StatusOr<ExecutionOutput> ExecuteWithDeviceBuffers(
Dhlo_runner.cc97 TF_ASSIGN_OR_RETURN(ExecutionOutput result, in Execute()
111 TF_ASSIGN_OR_RETURN(ExecutionOutput result, in ExecuteWithExecutable()
150 StatusOr<ExecutionOutput> HloRunner::ExecuteWithDeviceBuffers( in ExecuteWithDeviceBuffers()
159 StatusOr<ExecutionOutput> HloRunner::ExecuteWithDeviceBuffers( in ExecuteWithDeviceBuffers()
176 ExecutionOutput retval, in ExecuteWithDeviceBuffers()
/external/tensorflow/tensorflow/stream_executor/tpu/
Dtpu_executable_interface.cc53 StatusOr<ExecutionOutput>
93 ExecutionOutput result(std::move(device_shape), allocator, device_ordinal); in AllocateOutputMemoryWithInputReuse()
168 StatusOr<ExecutionOutput> TpuExecutableInterface::ExecuteAsyncOnStream( in ExecuteAsyncOnStream()
186 ExecutionOutput result, in ExecuteAsyncOnStream()
Dtpu_executable_interface.h47 StatusOr<ExecutionOutput> ExecuteAsyncOnStream(
66 StatusOr<ExecutionOutput> AllocateOutputMemoryWithInputReuse(
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.h56 StatusOr<ExecutionOutput> Run(std::vector<ExecutionInput> arguments,
67 StatusOr<ExecutionOutput> RunAsync(std::vector<ExecutionInput> arguments,
77 StatusOr<ExecutionOutput> RunAsync(
Dlocal_client.cc186 StatusOr<ExecutionOutput> LocalExecutable::Run( in Run()
193 return AsyncCallAndBlockHostUntilDone<ExecutionOutput>( in Run()
280 StatusOr<ExecutionOutput> LocalExecutable::RunAsync( in RunAsync()
309 TF_ASSIGN_OR_RETURN(ExecutionOutput outputs, in RunAsync()
322 StatusOr<ExecutionOutput> LocalExecutable::RunAsync( in RunAsync()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dcpu_executable.h58 StatusOr<ExecutionOutput> ExecuteAsyncOnStream(
119 StatusOr<ExecutionOutput> CreateResultShapedBuffer(
Dcpu_executable.cc225 StatusOr<ExecutionOutput> CpuExecutable::CreateResultShapedBuffer( in CreateResultShapedBuffer()
230 ExecutionOutput result(/*on_device_shape=*/result_shape(), in CreateResultShapedBuffer()
325 StatusOr<ExecutionOutput> CpuExecutable::ExecuteAsyncOnStream( in ExecuteAsyncOnStream()
359 ExecutionOutput result, in ExecuteAsyncOnStream()
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dtpu_execute_op.cc187 xla::StatusOr<xla::ExecutionOutput> RunExecutable( in RunExecutable()
215 xla::ExecutionOutput output, in RunExecutable()
229 xla::StatusOr<xla::ExecutionOutput> ExecuteTPUProgram( in ExecuteTPUProgram()
236 auto runfn = [&]() -> xla::StatusOr<xla::ExecutionOutput> { in ExecuteTPUProgram()
243 return memory_manager->Run<xla::ExecutionOutput>( in ExecuteTPUProgram()
355 xla::ExecutionOutput output, in DoWork()
456 xla::ExecutionOutput output, in DoWork()
Dxrt_execute_op.cc268 se::Stream* stream, xla::ExecutionOutput run_result, xla::Backend* backend, in CreateOutputTuple()
356 xla::ExecutionOutput run_result, in RunExecutable()
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/
Dexecutable_base.cc41 StatusOr<ExecutionOutput> InterpreterExecutableBase::ExecuteAsyncOnStream( in ExecuteAsyncOnStream()
135 ExecutionOutput result(std::move(result_buffers)); in ExecuteAsyncOnStream()
Dexecutable_base.h40 StatusOr<ExecutionOutput> ExecuteAsyncOnStream(
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgpu_executable.h114 StatusOr<ExecutionOutput> ExecuteAsyncOnStream(
126 StatusOr<ExecutionOutput> ExecuteAsyncOnStreamImpl(
Dgpu_executable.cc398 StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStream( in ExecuteAsyncOnStream()
411 ExecutionOutput out, in ExecuteAsyncOnStream()
416 StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStreamImpl( in ExecuteAsyncOnStreamImpl()
438 ExecutionOutput result(/*on_device_shape=*/output_shape_, memory_allocator, in ExecuteAsyncOnStreamImpl()
/external/tensorflow/tensorflow/core/tpu/
Dtpu_execute.h40 xla::StatusOr<xla::ExecutionOutput> TPUExecute(
Dtpu_execute.cc412 xla::StatusOr<xla::ExecutionOutput> TPUExecute( in TPUExecute()
498 xla::StatusOr<xla::ExecutionOutput> output = in TPUExecute()
Dtpu_on_demand_compiler.cc99 StatusOr<ExecutionOutput> ExecuteAsyncOnStream( in ExecuteAsyncOnStream()
160 ExecutionOutput output(std::move(result)); in ExecuteAsyncOnStream()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_compile_on_demand_op.cc87 xla::StatusOr<xla::ExecutionOutput> run_result = in Run()
90 xla::ExecutionOutput execution_output = run_result.ConsumeValueOrDie(); in Run()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/tests/
Dmlir_gpu_test_base.cc81 StatusOr<ExecutionOutput> MlirGpuTestBase::RunMlirModule( in RunMlirModule()
130 TF_ASSIGN_OR_RETURN(ExecutionOutput output, in RunMlirModuleWithHostBuffers()
Dmlir_gpu_test_base.h55 StatusOr<ExecutionOutput> RunMlirModule(
/external/tensorflow/tensorflow/compiler/xla/tests/
Dbuffer_donation_test.cc125 StatusOr<ExecutionOutput> output_status = in RunAndCheck()
137 ExecutionOutput output = output_status.ConsumeValueOrDie(); in RunAndCheck()
/external/tensorflow/tensorflow/compiler/jit/kernels/
Dxla_ops.cc279 xla::StatusOr<xla::ExecutionOutput> execution_output; in Compute()
515 xla::StatusOr<xla::ExecutionOutput> execution_output; in Compute()
/external/tensorflow/tensorflow/core/tpu/kernels/
Dtpu_execute_op.cc755 xla::ExecutionOutput output, in DoWork()

12