/external/tensorflow/tensorflow/core/kernels/ |
D | queue_op.h | 70 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final; 73 virtual void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 99 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 124 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 145 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 170 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 213 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 231 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 250 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 262 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
|
D | queue_op.cc | 53 void QueueOpKernel::ComputeAsync(OpKernelContext* ctx, DoneCallback callback) { in ComputeAsync() function in tensorflow::QueueOpKernel 62 ComputeAsync(ctx, queue, [callback, queue]() { in ComputeAsync() 88 void EnqueueOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::EnqueueOp 129 void EnqueueManyOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::EnqueueManyOp 166 void DequeueOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::DequeueOp 209 void DequeueManyOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::DequeueManyOp 282 void DequeueUpToOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::DequeueUpToOp 333 void QueueCloseOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::QueueCloseOp 347 void QueueSizeOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::QueueSizeOp 358 void QueueIsClosedOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, in ComputeAsync() function in tensorflow::QueueIsClosedOp
|
D | nccl_ops.cc | 97 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclAllReduceOpKernel 133 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclReduceSendKernel 164 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclReduceRecvKernel 203 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclBroadcastSendKernel 232 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclBroadcastRecvKernel 265 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon6c22edc00111::NcclStubKernel
|
D | conditional_accumulator_base_op.h | 198 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final { in ComputeAsync() function 202 ComputeAsync(ctx, accumulator, [callback, accumulator]() { in ComputeAsync() 209 virtual void ComputeAsync(OpKernelContext* ctx, 239 void ComputeAsync(OpKernelContext* ctx, in ComputeAsync() function
|
D | barrier_ops.cc | 507 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final { in ComputeAsync() function in tensorflow::barrier::BarrierOpKernel 511 ComputeAsync(ctx, barrier, [callback, barrier]() { in ComputeAsync() 518 virtual void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, 532 void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, in ComputeAsync() function in tensorflow::barrier::InsertManyOp 581 void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, in ComputeAsync() function in tensorflow::barrier::TakeManyOp 640 void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, in ComputeAsync() function in tensorflow::barrier::BarrierCloseOp 659 void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, in ComputeAsync() function in tensorflow::barrier::BarrierIncompleteSizeOp 678 void ComputeAsync(OpKernelContext* ctx, Barrier* barrier, in ComputeAsync() function in tensorflow::barrier::BarrierReadySizeOp
|
D | stack.h | 44 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override; 63 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
|
D | collective_ops.cc | 55 ComputeAsync(c, done); in CanProceedWithCompute() 93 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anond4bee1ea0111::CollectiveGatherOpKernel 217 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anond4bee1ea0111::CollectiveReduceOpKernel 289 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anond4bee1ea0111::CollectiveBcastSendOpKernel 365 void ComputeAsync(OpKernelContext* c, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anond4bee1ea0111::CollectiveBcastRecvOpKernel
|
D | sendrecv_ops.h | 40 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
|
D | partitioned_function_ops.h | 41 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
|
D | function_ops.h | 65 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
|
D | functional_ops.cc | 124 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon351ad7530111::IfOp 217 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon351ad7530111::CaseOp 330 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon351ad7530111::WhileOp 522 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon351ad7530111::ForOp
|
D | stack.cc | 220 void StackPushOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) { in ComputeAsync() function in tensorflow::StackPushOp 289 void StackPopOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) { in ComputeAsync() function in tensorflow::StackPopOp
|
D | rpc_op.cc | 60 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::RpcOp
|
D | determinant_op.cc | 133 void ComputeAsync(OpKernelContext* context, DoneCallback done) final { in ComputeAsync() function in tensorflow::DeterminantOpGpu 273 void ComputeAsync(OpKernelContext* context, DoneCallback done) final { in ComputeAsync() function in tensorflow::LogDeterminantOpGpu
|
D | self_adjoint_eig_v2_op_gpu.cc | 48 void ComputeAsync(OpKernelContext* context, DoneCallback done) final { in ComputeAsync() function in tensorflow::SelfAdjointEigV2OpGpu
|
D | function_ops.cc | 242 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::SymbolicGradientOp 310 void RemoteCallOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) { in ComputeAsync() function in tensorflow::RemoteCallOp
|
D | cholesky_op.cc | 100 void ComputeAsync(OpKernelContext* context, DoneCallback done) final { in ComputeAsync() function in tensorflow::CholeskyOpGpu
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | gpu_swapping_kernels.cc | 29 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon2b740a240111::CopyFromGpuToHostKernel 62 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::__anon2b740a240111::CopyFromHostToGpuKernel
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | device.h | 90 virtual void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, in ComputeAsync() function 92 op_kernel->ComputeAsync(context, std::move(done)); in ComputeAsync()
|
D | renamed_device.h | 136 void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, in ComputeAsync() function 138 underlying_device_->ComputeAsync(op_kernel, context, std::move(done)); in ComputeAsync()
|
D | testlib_ops.cc | 92 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::test::DelayOp
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_execute_op.cc | 234 void ComputeAsync(OpKernelContext* context, DoneCallback done) override; 243 void XRTExecuteOp::ComputeAsync(OpKernelContext* context, DoneCallback done) { in ComputeAsync() function in tensorflow::__anonbcd96c1c0111::XRTExecuteOp 319 void ComputeAsync(OpKernelContext* context, DoneCallback done) override; 328 void XRTExecuteChainedOp::ComputeAsync(OpKernelContext* context, in ComputeAsync() function in tensorflow::__anonbcd96c1c0111::XRTExecuteChainedOp
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | map_defun_op.h | 51 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | to_tf_record_op.cc | 48 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { in ComputeAsync() function in tensorflow::data::experimental::__anon13282d8d0111::ToTFRecordOp
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device.h | 142 void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
|