/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_compilation_device.cc | 90 void XlaCompilationDevice::Compute(OpKernel* op_kernel, in Compute() argument 93 << FormatNodeDefForError(op_kernel->def()); in Compute() 96 metadata.set_op_type(op_kernel->type_string()); in Compute() 97 metadata.set_op_name(op_kernel->name()); in Compute() 101 op_kernel->def(), std::numeric_limits<int>::max()); in Compute() 110 op_kernel->Compute(context); in Compute()
|
D | graph_compiler.cc | 135 std::unique_ptr<OpKernel> op_kernel(op_kernel_raw); in Compile() local 145 params.op_kernel = op_kernel.get(); in Compile() 165 VLOG(3) << "Translating " << params.op_kernel->name(); in Compile() 169 device_->Compute(CHECK_NOTNULL(params.op_kernel), &op_context); in Compile()
|
D | xla_op_registry.h | 160 static Status CompileTimeConstantInputs(const OpKernel& op_kernel, in CompileTimeConstantInputs() argument 162 return CompileTimeConstantInputs(op_kernel.def(), /*op_kernel=*/&op_kernel, in CompileTimeConstantInputs() 246 const OpKernel* op_kernel,
|
/external/tensorflow/tensorflow/core/framework/ |
D | op_kernel_test.cc | 332 params.op_kernel = op.get(); in TEST_F() 357 params.op_kernel = op.get(); in TEST_F() 385 params.op_kernel = op.get(); in TEST_F() 668 params.op_kernel = op.get(); in TEST_F() 753 std::unique_ptr<OpKernel> op_kernel = in TEST_F() local 756 auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get()); in TEST_F() 760 op_kernel = ExpectSuccess("GetAttrStringList", DEVICE_CPU, in TEST_F() 762 get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get()); in TEST_F() 774 std::unique_ptr<OpKernel> op_kernel = ExpectSuccess( in TEST_F() local 777 auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get()); in TEST_F() [all …]
|
D | op_kernel.cc | 279 params, static_cast<int>(params->op_kernel->output_types().size())) {} in OpKernelContext() 353 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in input() 371 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in input_dtype() 389 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in input_ref_mutex() 401 DCHECK_LT(index, num_inputs()) << " name: " << op_kernel().name(); in input() 473 params_->op_kernel->InputRange(input_name, &input_index, &stop)); in forward_input_to_output_with_shape() 481 params_->op_kernel->OutputRange(output_name, &output_index, &stop)); in forward_input_to_output_with_shape() 596 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in mutable_input() 621 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in replace_ref_input() 637 TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop)); in input_list() [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | evaluation_utils.cc | 92 std::unique_ptr<OpKernel> op_kernel( in EvaluateNode() local 100 params.op_kernel = op_kernel.get(); in EvaluateNode() 104 const int num_outputs = op_kernel->num_outputs(); in EvaluateNode() 113 op_kernel->Compute(&op_context); in EvaluateNode()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device.cc | 373 void XlaDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) { in Compute() argument 374 VLOG(2) << "XlaDevice::Compute " << op_kernel->name() << ":" in Compute() 375 << op_kernel->type_string(); in Compute() 376 op_kernel->Compute(context); in Compute() 379 void XlaDevice::ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, in ComputeAsync() argument 381 VLOG(2) << "XlaDevice::ComputeAsync " << op_kernel->name() << ":" in ComputeAsync() 382 << op_kernel->type_string(); in ComputeAsync() 383 tracing::ScopedActivity activity(op_kernel->name(), op_kernel->type_string(), in ComputeAsync() 384 op_kernel->IsExpensive()); in ComputeAsync() 385 op_kernel->ComputeAsync(context, done); in ComputeAsync()
|
D | xla_compile_on_demand_op.cc | 91 Status XlaCompileOnDemandOp::MustArgumentBeConstant(const OpKernel* op_kernel, in MustArgumentBeConstant() argument 99 *op_kernel, &constant_input_indices)); in MustArgumentBeConstant() 104 Status XlaCompileOnDemandOp::ShouldArgumentBeConstant(const OpKernel* op_kernel, in ShouldArgumentBeConstant() argument 110 return MustArgumentBeConstant(op_kernel, argument_idx, result); in ShouldArgumentBeConstant() 123 TF_RETURN_IF_ERROR(ShouldArgumentBeConstant(&ctx->op_kernel(), i, in Compile() 133 TF_RETURN_IF_ERROR(MustArgumentBeConstant(&ctx->op_kernel(), i, in Compile()
|
D | xla_compile_on_demand_op.h | 41 Status ShouldArgumentBeConstant(const OpKernel* op_kernel, int64 argument_idx, 43 Status MustArgumentBeConstant(const OpKernel* op_kernel, int64 argument_idx,
|
/external/tensorflow/tensorflow/core/kernels/ |
D | cwise_ops_common.cc | 39 const string& op = ctx->op_kernel().type_string(); in SetComputeError() 41 DataTypeIsInteger(ctx->op_kernel().input_type(0))) { in SetComputeError() 44 DataTypeIsInteger(ctx->op_kernel().input_type(0)) && in SetComputeError() 45 DataTypeIsSigned(ctx->op_kernel().input_type(1))) { in SetComputeError()
|
D | scoped_allocator_ops.cc | 140 context->op_kernel().requested_input(i), " to node ", in Compute() 141 context->op_kernel().name(), " input bounds = [", input_lb, ", ", in Compute() 148 context->op_kernel().requested_input(i), " to node ", in Compute() 149 context->op_kernel().name(), " input bounds = [", input_lb, ", ", in Compute() 211 " to node ", context->op_kernel().name())); in Compute() 217 " to node ", context->op_kernel().name())); in Compute()
|
D | mkl_tfconv_op.h | 61 static void ConvertMklToTf(OpKernel* op_kernel, OpKernelContext* context, in ConvertMklToTf() argument 80 DataType input_data_type = op_kernel->input_type(input_number); in ConvertMklToTf() 81 DataType output_data_type = op_kernel->output_type(input_number); in ConvertMklToTf()
|
D | ops_testutil.h | 55 for (int index = 0; index < params->op_kernel->num_outputs(); index++) { in SetOutputAttrs() 58 (params->op_kernel->output_memory_types()[index] == HOST_MEMORY); in SetOutputAttrs() 171 params_.get()->op_kernel = kernel_.get(); in RunOpKernel()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | device.h | 88 virtual void Compute(OpKernel* op_kernel, OpKernelContext* context) { in Compute() argument 89 op_kernel->Compute(context); in Compute() 93 virtual void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, in ComputeAsync() argument 95 op_kernel->ComputeAsync(context, std::move(done)); in ComputeAsync()
|
D | renamed_device.h | 99 void Compute(OpKernel* op_kernel, OpKernelContext* context) override { in Compute() argument 100 underlying_->Compute(op_kernel, context); in Compute() 103 void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, in ComputeAsync() argument 105 underlying_->ComputeAsync(op_kernel, context, std::move(done)); in ComputeAsync()
|
D | executor.cc | 768 const OpKernel* op_kernel = item->kernel; in SetAllocAttrs() local 769 DCHECK_LT(out, op_kernel->output_memory_types().size()); in SetAllocAttrs() 770 bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY; in SetAllocAttrs() 1730 OpKernel* op_kernel = item.kernel; in Process() local 1731 params.op_kernel = op_kernel; in Process() 1799 const string& op_name = op_kernel->name(); in Process() 1807 op_name, strings::StrCat(op_kernel->type_string(), in Process() 1809 device->Compute(op_kernel, &ctx); in Process() 1815 strings::StrCat(op_kernel->type_string(), "#id=", step_id_, in Process() 1818 device->Compute(op_kernel, &ctx); in Process() [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | dataset_test_base.cc | 90 const NodeDef& node_def, std::unique_ptr<OpKernel>* op_kernel) { in CreateOpKernel() argument 95 op_kernel->reset(kernel); in CreateOpKernel() 172 Status DatasetOpsTestBase::RunOpKernel(OpKernel* op_kernel, in RunOpKernel() argument 174 device_->Compute(op_kernel, context); in RunOpKernel() 186 params_->op_kernel = kernel; in CreateOpKernelContext() 199 for (int index = 0; index < params_->op_kernel->num_outputs(); index++) { in CreateOpKernelContext() 202 (params_->op_kernel->output_memory_types()[index] == HOST_MEMORY); in CreateOpKernelContext()
|
D | concatenate_dataset_op_test.cc | 50 std::unique_ptr<OpKernel> *op_kernel) { in CreateConcatenateDatasetKernel() argument 54 TF_RETURN_IF_ERROR(CreateOpKernel(node_def_, op_kernel)); in CreateConcatenateDatasetKernel() 60 OpKernel *const op_kernel, in CreateConcatenateDatasetContext() argument 63 TF_RETURN_IF_ERROR(CheckOpKernelInput(*op_kernel, *inputs)); in CreateConcatenateDatasetContext() 64 TF_RETURN_IF_ERROR(CreateOpKernelContext(op_kernel, inputs, context)); in CreateConcatenateDatasetContext()
|
D | zip_dataset_op_test.cc | 54 std::unique_ptr<OpKernel> *op_kernel) { in CreateZipDatasetKernel() argument 64 TF_RETURN_IF_ERROR(CreateOpKernel(node_def_, op_kernel)); in CreateZipDatasetKernel() 70 OpKernel *const op_kernel, in CreateZipDatasetContext() argument 73 TF_RETURN_IF_ERROR(CheckOpKernelInput(*op_kernel, *inputs)); in CreateZipDatasetContext() 74 TF_RETURN_IF_ERROR(CreateOpKernelContext(op_kernel, inputs, context)); in CreateZipDatasetContext()
|
D | single_threaded_executor.cc | 126 OpKernel* op_kernel = kernel_state.kernel; in Initialize() local 128 DCHECK_LT(out, op_kernel->output_memory_types().size()); in Initialize() 129 bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY; in Initialize() 256 params.op_kernel = kernel_state.kernel; in RunAsync()
|
/external/tensorflow/tensorflow/core/common_runtime/sycl/ |
D | sycl_device.cc | 28 void SYCLDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) { in Compute() argument 33 op_kernel->name()); in Compute() 35 op_kernel->Compute(context); in Compute()
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_device.cc | 124 operation_ = context->op_kernel().name() + "/EigenAllocator"; in Reinitialize() 489 void BaseGPUDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) { in Compute() argument 499 if (op_kernel->is_internal() && op_kernel->type_string() == "_Recv") { in Compute() 503 ComputeHelper(op_kernel, context); in Compute() 507 string BaseGPUDevice::ComputeOpKernelDebugString(const OpKernel& op_kernel, in ComputeOpKernelDebugString() argument 509 return strings::StrCat(op_kernel.name(), " op ", op_kernel.type_string(), in ComputeOpKernelDebugString() 514 void BaseGPUDevice::ComputeHelper(OpKernel* op_kernel, in ComputeHelper() argument 529 << ComputeOpKernelDebugString(*op_kernel, stream_id); in ComputeHelper() 569 op_kernel->Compute(context); in ComputeHelper() 579 << ComputeOpKernelDebugString(*op_kernel, stream_id); in ComputeHelper() [all …]
|
D | gpu_device.h | 82 void Compute(OpKernel* op_kernel, OpKernelContext* context) override; 86 void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context, 167 void ComputeHelper(OpKernel* op_kernel, OpKernelContext* context); 169 string ComputeOpKernelDebugString(const OpKernel& op_kernel,
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | conv_ops.cc | 55 ctx->op_kernel().type_string(), ctx->Input(0), ctx->Input(1), attrs_); in Compile() 107 MakeXlaBackpropInputConvOp(ctx->op_kernel().type_string(), input_shape, in Compile() 165 ctx->op_kernel().type_string(), ctx->Input(0), filter_shape, in Compile()
|
D | reduction_ops_common.cc | 50 VLOG(1) << "ReductionOp: " << ctx->op_kernel().name(); in Compile() 101 string desc = ctx->op_kernel().name(); in Compile()
|