Home
last modified time | relevance | path

Searched refs:OpKernelContext (Results 1 – 25 of 1023) sorted by relevance

12345678910>>...41

/external/tensorflow/tensorflow/core/kernels/
Drelu_op.h38 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { in Operate()
49 static void ValidateSameSizeHelper(OpKernelContext* context, const Tensor& g, in ValidateSameSizeHelper()
54 static bool ValidateSameSize(OpKernelContext* context, const Tensor& g, in ValidateSameSize()
66 void OperateNoTemplate(OpKernelContext* context, const Tensor& g,
76 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, in Operate()
83 void ReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate()
97 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { in Operate()
109 void OperateNoTemplate(OpKernelContext* context, const Tensor& g,
118 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, in Operate()
125 void Relu6GradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate()
[all …]
Dqueue_op.h38 void Compute(OpKernelContext* context) override;
70 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final;
73 virtual void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
99 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
124 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
145 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
170 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
213 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
231 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
250 void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
[all …]
Dmeta_support.h29 class OpKernelContext; variable
70 void QuantizedGemm(OpKernelContext* context, bool transpose_a, bool transpose_b,
79 void Requantize(OpKernelContext* context, const qint32* input, int count,
85 void Dequantize(OpKernelContext* context, const quint8* input, int count,
90 void Quantize(OpKernelContext*, const float* input, int count, float range_min,
98 void QuantizedBiasAdd(OpKernelContext* context, const quint8* input,
106 void Clamp(OpKernelContext* context, const quint8* input, int input_count,
Dcontrol_flow_ops.h31 void Compute(OpKernelContext* context) override {} in Compute()
42 void Compute(OpKernelContext* context) override;
57 void Compute(OpKernelContext* context) override;
70 void Compute(OpKernelContext* context) override;
83 void Compute(OpKernelContext* context) override;
96 void Compute(OpKernelContext* context) override;
108 void Compute(OpKernelContext* context) override;
124 void Compute(OpKernelContext* context) override;
Dconditional_accumulator_base_op.h57 void Compute(OpKernelContext* ctx) override { in Compute()
76 virtual void SetHandleToOutput(OpKernelContext* ctx)
79 virtual Status CheckSignature(OpKernelContext* ctx) = 0;
97 Status SetAccumulatorHandle(OpKernelContext* ctx) in SetAccumulatorHandle()
133 void Compute(OpKernelContext* ctx) final { in Compute()
141 virtual void Compute(OpKernelContext* ctx,
147 virtual void CheckSignature(OpKernelContext* ctx, in CheckSignature()
167 void Compute(OpKernelContext* ctx, in Compute()
196 void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final { in ComputeAsync()
207 virtual void ComputeAsync(OpKernelContext* ctx,
[all …]
Dtranspose_op.h28 void Compute(OpKernelContext* ctx) override;
31 virtual Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
41 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
51 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
61 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
73 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
85 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
97 Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
Dtensor_list_util.h24 class OpKernelContext; variable
29 OpKernelContext* c, const TensorList& a, const TensorList& b,
31 std::function<Status(OpKernelContext* ctx, const Tensor& a, const Tensor& b,
36 OpKernelContext* c, const TensorList& x, TensorList* y,
37 std::function<Status(OpKernelContext* ctx, const Tensor& input,
Dfifo_queue.h42 void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx,
44 void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx,
46 void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) override;
47 void TryDequeueMany(int num_elements, OpKernelContext* ctx,
61 void DequeueLocked(OpKernelContext* ctx, Tuple* tuple)
66 OpKernelContext* ctx,
Dpriority_queue.h59 void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx,
61 void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx,
63 void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) override;
64 void TryDequeueMany(int num_elements, OpKernelContext* ctx,
80 void DequeueLocked(OpKernelContext* ctx, Tuple* tuple)
85 OpKernelContext* ctx,
Dreader_ops.cc32 void Compute(OpKernelContext* context) override { in Compute()
41 virtual void ComputeWithReader(OpKernelContext* context,
56 void ComputeAsync(OpKernelContext* context, DoneCallback done) override { in ComputeAsync()
69 virtual void ComputeWithReader(OpKernelContext* context,
80 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
109 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
163 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
181 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
200 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
219 void ComputeWithReader(OpKernelContext* context, in ComputeWithReader()
[all …]
Dresource_variable_ops.h27 void Compute(OpKernelContext* ctx) override;
45 void Compute(OpKernelContext* ctx) override;
54 void Compute(OpKernelContext* ctx) override;
64 void Compute(OpKernelContext* ctx) override;
73 void Compute(OpKernelContext* ctx) override;
81 void Compute(OpKernelContext* ctx) override { in Compute()
Dconditional_accumulator_base.h59 virtual void TryApplyGrad(int64_t local_step, OpKernelContext* ctx) = 0;
60 void TryTakeGrad(int num_required, OpKernelContext* ctx,
83 virtual void DivideAccumGradByCounter(OpKernelContext* ctx)
85 virtual bool SetOutput(OpKernelContext* ctx) = 0;
95 OpKernelContext* context;
102 OpKernelContext* context, CancellationManager* cancellation_manager, in Attempt()
156 bool TakeGradLockedHelper(OpKernelContext* ctx, DoneCallback callback)
Dsave_restore_tensor.h24 class OpKernelContext; variable
36 OpKernelContext* context,
52 void RestoreTensor(OpKernelContext* context,
66 Status RestoreTensorsV2(OpKernelContext* context, const Tensor& prefix,
/external/tensorflow/tensorflow/core/kernels/sparse/
Dkernels.h39 Status operator()(OpKernelContext* c, TTypes<int64_t>::ConstMatrix indices,
85 Status operator()(OpKernelContext* c,
102 Status operator()(OpKernelContext* c, const int rows, const int cols,
138 Status operator()(OpKernelContext* c,
148 Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
158 Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
188 explicit CSRSparseMatrixAdd(OpKernelContext* ctx, const T alpha,
196 explicit CSRSparseSparseMatrixMatMul(OpKernelContext* ctx, bool transpose_a,
203 Status operator()(OpKernelContext* ctx, const ConstCSRComponent<T>& x,
210 Status operator()(OpKernelContext* ctx, bool conjugate,
[all …]
/external/tensorflow/tensorflow/core/kernels/data/
Diterator_ops.h55 Status GetNext(OpKernelContext* ctx, std::vector<Tensor>* out_tensors,
62 Status Restore(OpKernelContext* ctx, IteratorStateReader* reader);
69 Status SetIteratorFromDataset(OpKernelContext* ctx,
156 void Compute(OpKernelContext* context) override TF_LOCKS_EXCLUDED(mu_);
167 OpKernelContext* ctx, std::unique_ptr<DeviceMgr>* device_mgr,
191 Status CreateResource(OpKernelContext* ctx,
222 void Compute(OpKernelContext* ctx) final;
223 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) final;
226 virtual Status DoCompute(OpKernelContext* ctx) = 0;
238 Status DoCompute(OpKernelContext* ctx) override;
[all …]
Doptional_ops.h30 Status WriteOptionalWithValueToOutput(OpKernelContext* ctx, int output_index,
35 Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index);
38 Status OptionalZerosLike(OpKernelContext* ctx, const OptionalVariant& x, in OptionalZerosLike()
44 Status OptionalBinaryAdd(OpKernelContext* ctx, const OptionalVariant& a, in OptionalBinaryAdd()
53 void Compute(OpKernelContext* ctx) override;
60 void Compute(OpKernelContext* ctx) override;
67 void Compute(OpKernelContext* ctx) override;
83 void Compute(OpKernelContext* ctx) override;
/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_util.h39 OpKernelContext::Params sub_params_;
46 std::unique_ptr<OpKernelContext> sub_ctx_;
47 SubContext(OpKernelContext* ctx, OpKernelContext::Params* params,
52 Status ComputeBinOp(OpKernelContext* op_ctx, OpKernelContext::Params* params,
/external/tensorflow/tensorflow/core/framework/
Dop_kernel.cc215 string OpKernel::ShapeTraceString(const OpKernelContext& ctx) const { in ShapeTraceString()
237 string OpKernel::TraceString(const OpKernelContext& ctx, bool verbose) const { in TraceString()
249 void AsyncOpKernel::Compute(OpKernelContext* context) { in Compute()
335 const int OpKernelContext::Params::kNeverForward;
336 const int OpKernelContext::Params::kNoReservation;
338 OpKernelContext::OpKernelContext(Params* params) in OpKernelContext() function in tensorflow::OpKernelContext
339 : OpKernelContext( in OpKernelContext()
342 OpKernelContext::OpKernelContext(Params* params, int num_outputs) in OpKernelContext() function in tensorflow::OpKernelContext
360 OpKernelContext::~OpKernelContext() { in ~OpKernelContext()
376 Allocator* OpKernelContext::get_allocator(AllocatorAttributes attr) { in get_allocator()
[all …]
Dlookup_interface.h25 class OpKernelContext; variable
50 virtual Status Find(OpKernelContext* ctx, const Tensor& keys, Tensor* values,
64 virtual Status Insert(OpKernelContext* ctx, const Tensor& keys,
76 virtual Status Remove(OpKernelContext* ctx, const Tensor& keys) = 0;
85 virtual Status ExportValues(OpKernelContext* ctx) = 0;
91 virtual Status ImportValues(OpKernelContext* ctx, const Tensor& keys,
Dqueue_interface.h43 virtual void TryEnqueue(const Tuple& tuple, OpKernelContext* ctx,
48 virtual void TryEnqueueMany(const Tuple& tuple, OpKernelContext* ctx,
54 virtual void TryDequeue(OpKernelContext* ctx, CallbackWithTuple callback) = 0;
62 virtual void TryDequeueMany(int num_elements, OpKernelContext* ctx,
77 virtual void Close(OpKernelContext* ctx, bool cancel_pending_enqueues,
/external/tensorflow/tensorflow/core/tpu/kernels/
Dtransfer_ops.h31 virtual StatusOr<int> GetDeviceOrdinal(OpKernelContext* ctx) = 0;
51 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
54 virtual Status DoWork(OpKernelContext* context, int device_ordinal) = 0;
56 Status RunTransferWithOrdinal(OpKernelContext* ctx, int device_ordinal);
61 virtual Status RunTransfer(OpKernelContext* ctx) = 0;
80 Status RunTransfer(OpKernelContext* ctx) override;
97 Status RunTransfer(OpKernelContext* ctx) override;
111 StatusOr<int> GetDeviceOrdinal(OpKernelContext* ctx) override;
Dtpu_configuration_ops.h32 OpKernelContext* ctx);
47 void Compute(OpKernelContext* ctx) override;
71 void Compute(OpKernelContext* ctx) override;
91 void Compute(OpKernelContext* ctx) override;
117 void Compute(OpKernelContext* ctx) override;
139 void Compute(OpKernelContext* ctx) override;
158 void Compute(OpKernelContext* ctx) override;
Dtpu_functional_ops.h129 void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
151 Status GetTpuCoreOrdinal(OpKernelContext* ctx, uint64 input_hash,
159 Status InitializeVarOnTPU(OpKernelContext* ctx,
174 Status InitializeShardedVarOnTPU(OpKernelContext* ctx,
185 Status ReplaceResourceArgsWithVarHandleOps(Graph* graph, OpKernelContext* ctx,
194 Graph* graph, OpKernelContext* ctx, int device_ordinal,
201 OpKernelContext* ctx)
210 OpKernelContext* ctx) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
212 Status InferShapesWithResourceVar(Graph* graph, OpKernelContext* ctx,
268 FHandle handle, OpKernelContext* ctx,
[all …]
/external/tensorflow/tensorflow/core/kernels/linalg/
Dlinalg_ops_common.h43 void Compute(OpKernelContext* context) override;
50 virtual int NumMatrixInputs(const OpKernelContext* context) const { in NumMatrixInputs()
58 OpKernelContext* context, const TensorShapes& input_matrix_shapes) const { in ValidateInputMatrixShapes()
65 static void ValidateSingleMatrix(OpKernelContext* context,
69 OpKernelContext* context, const TensorShapes& input_matrix_shapes);
71 static void ValidateSolver(OpKernelContext* context,
75 static void ValidateSquareSolver(OpKernelContext* context,
150 virtual void ComputeMatrix(OpKernelContext* context,
174 void ComputeTensorSlice(OpKernelContext* context, int64_t matrix_index,
180 void AnalyzeInputs(OpKernelContext* context, TensorInputs* inputs,
[all …]
/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_device.h36 static Status GetResourceManager(OpKernelContext* ctx, ResourceMgr** rm);
39 OpKernelContext* ctx, int64_t max_number_of_entries);
64 const std::string& platform_name, OpKernelContext* ctx);
75 static Status InitScopedRef(OpKernelContext* ctx, int device_ordinal,
78 static Status InitScopedRef(OpKernelContext* ctx, ScopedRef* scoped_ref);

12345678910>>...41