/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op.h | 38 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { in Operate() 49 static void ValidateSameSizeHelper(OpKernelContext* context, const Tensor& g, in ValidateSameSizeHelper() 54 static bool ValidateSameSize(OpKernelContext* context, const Tensor& g, in ValidateSameSize() 66 void OperateNoTemplate(OpKernelContext* context, const Tensor& g, 76 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, in Operate() 83 void ReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate() 97 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { in Operate() 109 void OperateNoTemplate(OpKernelContext* context, const Tensor& g, 118 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, in Operate() 125 void Relu6GradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate() [all …]
|
D | queue_op.h | 38 void Compute(OpKernelContext* context) override; 70 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final; 73 virtual void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 99 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 124 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 145 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 170 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 213 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 231 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, 250 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue, [all …]
|
D | linalg_ops_common.h | 44 void Compute(OpKernelContext* context) override; 51 virtual int NumMatrixInputs(const OpKernelContext* context) const { in NumMatrixInputs() 59 OpKernelContext* context, const TensorShapes& input_matrix_shapes) const { in ValidateInputMatrixShapes() 66 static void ValidateSingleMatrix(OpKernelContext* context, 70 OpKernelContext* context, const TensorShapes& input_matrix_shapes); 72 static void ValidateSolver(OpKernelContext* context, 76 static void ValidateSquareSolver(OpKernelContext* context, 126 virtual void ComputeMatrix(OpKernelContext* context, 150 void ComputeTensorSlice(OpKernelContext* context, int64 matrix_index, 156 void AnalyzeInputs(OpKernelContext* context, TensorInputs* inputs, [all …]
|
D | transpose_op.h | 28 void Compute(OpKernelContext* ctx) override; 31 virtual Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 41 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 51 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 61 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 71 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 83 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 95 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 107 Status DoTranspose(OpKernelContext* ctx, const Tensor& in, 119 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
D | meta_support.h | 29 class OpKernelContext; variable 70 void QuantizedGemm(OpKernelContext* context, bool transpose_a, bool transpose_b, 79 void Requantize(OpKernelContext* context, const qint32* input, int count, 85 void Dequantize(OpKernelContext* context, const quint8* input, int count, 90 void Quantize(OpKernelContext*, const float* input, int count, float range_min, 98 void QuantizedBiasAdd(OpKernelContext* context, const quint8* input, 106 void Clamp(OpKernelContext* context, const quint8* input, int input_count,
|
D | control_flow_ops.h | 31 void Compute(OpKernelContext* context) override {} in Compute() 42 void Compute(OpKernelContext* context) override; 55 void Compute(OpKernelContext* context) override; 68 void Compute(OpKernelContext* context) override; 81 void Compute(OpKernelContext* context) override; 93 void Compute(OpKernelContext* context) override; 109 void Compute(OpKernelContext* context) override;
|
D | conditional_accumulator_base_op.h | 58 void Compute(OpKernelContext* ctx) override { in Compute() 89 Status SetAccumulatorHandle(OpKernelContext* ctx) in SetAccumulatorHandle() 129 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final { in ComputeAsync() 140 virtual void ComputeAsync(OpKernelContext* ctx, 153 void Compute(OpKernelContext* ctx) final { in Compute() 161 virtual void Compute(OpKernelContext* ctx, 177 virtual void CheckSignature(OpKernelContext* ctx, 180 void Compute(OpKernelContext* ctx, in Compute() 211 virtual void CheckSignature(OpKernelContext* ctx, 215 void ComputeAsync(OpKernelContext* ctx, in ComputeAsync()
|
D | reader_ops.cc | 32 void Compute(OpKernelContext* context) override { in Compute() 41 virtual void ComputeWithReader(OpKernelContext* context, 56 void ComputeAsync(OpKernelContext* context, DoneCallback done) override { in ComputeAsync() 69 virtual void ComputeWithReader(OpKernelContext* context, 80 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() 106 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() 160 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() 178 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() 197 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() 216 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader() [all …]
|
D | fifo_queue.h | 42 void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx, 44 void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx, 46 void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) override; 47 void TryDequeueMany(int num_elements, OpKernelContext* ctx, 61 void DequeueLocked(OpKernelContext* ctx, Tuple* tuple) 66 OpKernelContext* ctx,
|
D | priority_queue.h | 59 void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx, 61 void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx, 63 void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) override; 64 void TryDequeueMany(int num_elements, OpKernelContext* ctx, 80 void DequeueLocked(OpKernelContext* ctx, Tuple* tuple) 85 OpKernelContext* ctx,
|
D | conditional_accumulator_base.h | 59 virtual void TryApplyGrad(int64 local_step, OpKernelContext* ctx) = 0; 60 void TryTakeGrad(int num_required, OpKernelContext* ctx, 83 virtual void DivideAccumGradByCounter(OpKernelContext* ctx) 85 virtual bool SetOutput(OpKernelContext* ctx) = 0; 95 OpKernelContext* context; 102 OpKernelContext* context, CancellationManager* cancellation_manager, in Attempt() 156 bool TakeGradLockedHelper(OpKernelContext* ctx, DoneCallback callback)
|
D | save_restore_tensor.h | 24 class OpKernelContext; variable 36 OpKernelContext* context, 52 void RestoreTensor(OpKernelContext* context, 66 Status RestoreTensorsV2(OpKernelContext* context, const Tensor& prefix,
|
/external/tensorflow/tensorflow/core/framework/ |
D | op_kernel.cc | 180 void AsyncOpKernel::Compute(OpKernelContext* context) { in Compute() 194 Tensor* PersistentTensor::AccessTensor(OpKernelContext* context) { in AccessTensor() 274 const int OpKernelContext::Params::kNeverForward; 275 const int OpKernelContext::Params::kNoReservation; 277 OpKernelContext::OpKernelContext(Params* params) in OpKernelContext() function in tensorflow::OpKernelContext 278 : OpKernelContext( in OpKernelContext() 281 OpKernelContext::OpKernelContext(Params* params, int num_outputs) in OpKernelContext() function in tensorflow::OpKernelContext 301 OpKernelContext::~OpKernelContext() { in ~OpKernelContext() 317 Allocator* OpKernelContext::get_allocator(AllocatorAttributes attr) { in get_allocator() 341 void OpKernelContext::SetStatus(const Status& status) { in SetStatus() [all …]
|
D | resource_mgr.h | 236 ResourceHandle MakeResourceHandle(OpKernelContext* ctx, const string& container, 241 ResourceHandle MakeResourceHandle(OpKernelContext* ctx, const string& container, in MakeResourceHandle() 246 Status MakeResourceHandleToOutput(OpKernelContext* context, int output_index, 251 ResourceHandle MakePerStepResourceHandle(OpKernelContext* ctx, 255 const ResourceHandle& HandleFromInput(OpKernelContext* ctx, int input); 256 Status HandleFromInput(OpKernelContext* ctx, StringPiece input, 264 Status CreateResource(OpKernelContext* ctx, const ResourceHandle& p, T* value); 271 Status LookupResource(OpKernelContext* ctx, const ResourceHandle& p, T** value); 277 OpKernelContext* ctx, absl::Span<ResourceHandle const> p, 287 Status LookupOrCreateResource(OpKernelContext* ctx, const ResourceHandle& p, [all …]
|
D | lookup_interface.h | 25 class OpKernelContext; variable 50 virtual Status Find(OpKernelContext* ctx, const Tensor& keys, Tensor* values, 64 virtual Status Insert(OpKernelContext* ctx, const Tensor& keys, 76 virtual Status Remove(OpKernelContext* ctx, const Tensor& keys) = 0; 85 virtual Status ExportValues(OpKernelContext* ctx) = 0; 91 virtual Status ImportValues(OpKernelContext* ctx, const Tensor& keys,
|
D | queue_interface.h | 43 virtual void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx, 48 virtual void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx, 54 virtual void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) = 0; 62 virtual void TryDequeueMany(int num_elements, OpKernelContext* ctx, 77 virtual void Close(OpKernelContext* ctx, bool cancel_pending_enqueues,
|
D | op_kernel.h | 71 class OpKernelContext; // declared below, variable 115 virtual void Compute(OpKernelContext* context) = 0; 228 virtual void ComputeAsync(OpKernelContext* context, DoneCallback done) = 0; 233 void Compute(OpKernelContext* context) final; 250 Tensor* AccessTensor(OpKernelContext* context); 451 OpInputList(OpKernelContext* ctx, int start, int stop) in OpInputList() 460 OpKernelContext* ctx_; // not owned 470 OpMutableInputList(OpKernelContext* ctx, int start, int stop) in OpMutableInputList() 481 OpKernelContext* ctx_; // not owned 492 OpOutputList(OpKernelContext* ctx, int start, int stop) in OpOutputList() [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | collective_util.h | 39 OpKernelContext::Params sub_params_; 46 std::unique_ptr<OpKernelContext> sub_ctx_; 47 SubContext(OpKernelContext* ctx, OpKernelContext::Params* params, 52 Status ComputeBinOp(OpKernelContext* op_ctx, OpKernelContext::Params* params,
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | iterator_ops.h | 40 void Compute(OpKernelContext* context) override LOCKS_EXCLUDED(mu_); 65 OpKernelContext* ctx, std::unique_ptr<DeviceMgr>* device_mgr, 86 void Compute(OpKernelContext* context) override; 105 void Compute(OpKernelContext* ctx) override; 114 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override; 130 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override; 142 void Compute(OpKernelContext* ctx) override; 150 void Compute(OpKernelContext* ctx) override; 157 void Compute(OpKernelContext* ctx) override;
|
D | optional_ops.h | 31 Status WriteOptionalWithValueToOutput(OpKernelContext* ctx, int output_index, 36 Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index); 108 Status OptionalZerosLike(OpKernelContext* ctx, const OptionalVariant& x, in OptionalZerosLike() 125 Status OptionalBinaryAdd(OpKernelContext* ctx, const OptionalVariant& a, in OptionalBinaryAdd() 159 void Compute(OpKernelContext* ctx) override; 166 void Compute(OpKernelContext* ctx) override; 173 void Compute(OpKernelContext* ctx) override; 189 void Compute(OpKernelContext* ctx) override;
|
/external/tensorflow/tensorflow/contrib/mpi_collectives/ |
D | ring.cc | 36 template Status RingAllreduce<CPUDevice, int>(OpKernelContext*, const Tensor*, 38 template Status RingAllreduce<CPUDevice, long long>(OpKernelContext*, 41 template Status RingAllreduce<CPUDevice, float>(OpKernelContext*, const Tensor*, 45 template Status RingAllgather<CPUDevice, int>(OpKernelContext*, const Tensor*, 48 template Status RingAllgather<CPUDevice, long long>(OpKernelContext*, 52 template Status RingAllgather<CPUDevice, float>(OpKernelContext*, const Tensor*,
|
D | ring.cu.cc | 57 template Status RingAllreduce<GPUDevice, int>(OpKernelContext*, const Tensor*, 59 template Status RingAllreduce<GPUDevice, long long>(OpKernelContext*, 62 template Status RingAllreduce<GPUDevice, float>(OpKernelContext*, const Tensor*, 66 template Status RingAllgather<GPUDevice, int>(OpKernelContext*, const Tensor*, 69 template Status RingAllgather<GPUDevice, long long>(OpKernelContext*, 73 template Status RingAllgather<GPUDevice, float>(OpKernelContext*, const Tensor*,
|
/external/tensorflow/tensorflow/contrib/mpi_collectives/kernels/ |
D | ring.cc | 36 template Status RingAllreduce<CPUDevice, int>(OpKernelContext*, const Tensor*, 38 template Status RingAllreduce<CPUDevice, long long>(OpKernelContext*, 41 template Status RingAllreduce<CPUDevice, float>(OpKernelContext*, const Tensor*, 45 template Status RingAllgather<CPUDevice, int>(OpKernelContext*, const Tensor*, 48 template Status RingAllgather<CPUDevice, long long>(OpKernelContext*, 52 template Status RingAllgather<CPUDevice, float>(OpKernelContext*, const Tensor*,
|
D | ring.cu.cc | 57 template Status RingAllreduce<GPUDevice, int>(OpKernelContext*, const Tensor*, 59 template Status RingAllreduce<GPUDevice, long long>(OpKernelContext*, 62 template Status RingAllreduce<GPUDevice, float>(OpKernelContext*, const Tensor*, 66 template Status RingAllgather<GPUDevice, int>(OpKernelContext*, const Tensor*, 69 template Status RingAllgather<GPUDevice, long long>(OpKernelContext*, 73 template Status RingAllgather<GPUDevice, float>(OpKernelContext*, const Tensor*,
|
/external/tensorflow/tensorflow/c/ |
D | kernels.cc | 76 void Compute(OpKernelContext* ctx) override { in Compute() 124 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in TF_NumInputs() 129 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in TF_NumOutputs() 135 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in TF_GetInput() 149 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in TF_SetOutput() 171 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in TF_OpKernelContext_Failure() 193 auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); in DEFINE_TF_GETATTR() 198 return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)->step_id(); in TF_StepId()
|