/external/tensorflow/tensorflow/lite/kernels/ |
D | reduce.cc | 76 TfLiteStatus ResizeTempAxis(TfLiteContext* context, OpContext* op_context, in ResizeTempAxis() argument 79 axis_size->data[0] = static_cast<int>(NumElements(op_context->axis)); in ResizeTempAxis() 84 TfLiteStatus ResizeTempSum(TfLiteContext* context, OpContext* op_context, in ResizeTempSum() argument 87 size->data[0] = static_cast<int>(NumElements(op_context->output)); in ResizeTempSum() 92 TfLiteStatus ResizeOutputTensor(TfLiteContext* context, OpContext* op_context) { in ResizeOutputTensor() argument 93 size_t num_axis = NumElements(op_context->axis); in ResizeOutputTensor() 94 const TfLiteIntArray* input_dims = op_context->input->dims; in ResizeOutputTensor() 95 int input_num_dims = NumDimensions(op_context->input); in ResizeOutputTensor() 97 return context->ResizeTensor(context, op_context->output, in ResizeOutputTensor() 100 const int* axis = GetTensorData<int>(op_context->axis); in ResizeOutputTensor() [all …]
|
D | one_hot.cc | 62 void OneHotComputeImpl(const OneHotContext& op_context) { in OneHotComputeImpl() argument 67 for (int i = 0; i < op_context.axis; ++i) { in OneHotComputeImpl() 68 prefix_dim_size *= op_context.indices->dims->data[i]; in OneHotComputeImpl() 70 const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; in OneHotComputeImpl() 71 const int depth = *op_context.depth->data.i32; in OneHotComputeImpl() 73 const T on_value = *GetTensorData<T>(op_context.on_value); in OneHotComputeImpl() 74 const T off_value = *GetTensorData<T>(op_context.off_value); in OneHotComputeImpl() 82 T* output = GetTensorData<T>(op_context.output); in OneHotComputeImpl() 83 const TI* indices = GetTensorData<TI>(op_context.indices); in OneHotComputeImpl() 96 void OneHotCompute(const OneHotContext& op_context) { in OneHotCompute() argument [all …]
|
D | pad.cc | 71 PadContext* op_context) { in ResizeOutputTensor() argument 73 TF_LITE_ENSURE_EQ(context, SizeOfDimension(op_context->paddings, 0), in ResizeOutputTensor() 74 op_context->dims); in ResizeOutputTensor() 75 TF_LITE_ENSURE_EQ(context, SizeOfDimension(op_context->paddings, 1), 2); in ResizeOutputTensor() 78 TfLiteIntArray* input_size = op_context->input->dims; in ResizeOutputTensor() 80 const int32* paddings_data = GetTensorData<int32>(op_context->paddings); in ResizeOutputTensor() 82 for (int idx = 0; idx < op_context->dims; ++idx) { in ResizeOutputTensor() 93 return context->ResizeTensor(context, op_context->output, output_size); in ResizeOutputTensor() 100 PadContext op_context(context, node); in Prepare() local 101 TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type); in Prepare() [all …]
|
D | strided_slice.cc | 81 inline int32_t GetBeginValueAtIndex(StridedSliceContext* op_context, int idx) { in GetBeginValueAtIndex() argument 82 const int dim = op_context->input->dims->data[idx]; in GetBeginValueAtIndex() 83 const bool pos_stride = GetTensorData<int32_t>(op_context->strides)[idx] > 0; in GetBeginValueAtIndex() 84 return op_context->params->begin_mask & (1 << idx) in GetBeginValueAtIndex() 86 : ClampedIndex(GetTensorData<int32_t>(op_context->begin)[idx], dim, in GetBeginValueAtIndex() 90 inline int32_t GetEndValueAtIndex(StridedSliceContext* op_context, int idx) { in GetEndValueAtIndex() argument 91 const int dim = op_context->input->dims->data[idx]; in GetEndValueAtIndex() 92 const bool pos_stride = GetTensorData<int32_t>(op_context->strides)[idx] > 0; in GetEndValueAtIndex() 93 return op_context->params->end_mask & (1 << idx) in GetEndValueAtIndex() 95 : ClampedIndex(GetTensorData<int32_t>(op_context->end)[idx], dim, in GetEndValueAtIndex() [all …]
|
D | space_to_batch_nd.cc | 57 SpaceToBatchNDContext* op_context) { in ResizeOutputTensor() argument 58 TfLiteIntArray* input_size = op_context->input->dims; in ResizeOutputTensor() 59 const int32* block_shape = GetTensorData<int32>(op_context->block_shape); in ResizeOutputTensor() 60 const int32* paddings_data = GetTensorData<int32>(op_context->paddings); in ResizeOutputTensor() 62 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), in ResizeOutputTensor() 64 TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0], in ResizeOutputTensor() 66 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->paddings), in ResizeOutputTensor() 87 return context->ResizeTensor(context, op_context->output, output_size); in ResizeOutputTensor() 94 SpaceToBatchNDContext op_context(context, node); in Prepare() local 95 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.input), in Prepare() [all …]
|
D | maximum_minimum.cc | 53 OpContext op_context(context, node); in Prepare() local 54 TF_LITE_ENSURE_EQ(context, op_context.input1->type, op_context.input2->type); in Prepare() 55 op_context.output->type = op_context.input1->type; in Prepare() 58 !HaveSameShapes(op_context.input1, op_context.input2); in Prepare() 63 context, CalculateShapeForBroadcast(context, op_context.input1, in Prepare() 64 op_context.input2, &output_size)); in Prepare() 66 output_size = TfLiteIntArrayCopy(op_context.input1->dims); in Prepare() 69 return context->ResizeTensor(context, op_context.output, output_size); in Prepare() 88 const OpContext& op_context) { in TFLiteOperation() argument 90 GetTensorShape(op_context.input1), in TFLiteOperation() [all …]
|
D | dequantize.cc | 59 OpContext op_context(context, node); in Prepare() local 61 TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 || in Prepare() 62 op_context.input->type == kTfLiteInt8); in Prepare() 64 op_context.output->type = kTfLiteFloat32; in Prepare() 67 if (IsConstantTensor(op_context.input)) { in Prepare() 68 op_context.output->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 70 return context->ResizeTensor(context, op_context.output, in Prepare() 71 TfLiteIntArrayCopy(op_context.input->dims)); in Prepare() 76 OpContext op_context(context, node); in Eval() local 77 if (IsConstantTensor(op_context.input) && in Eval() [all …]
|
D | batch_to_space_nd.cc | 57 BatchToSpaceNDContext* op_context) { in ResizeOutputTensor() argument 58 TfLiteIntArray* input_size = op_context->input->dims; in ResizeOutputTensor() 59 const int* block_shape = GetTensorData<int32>(op_context->block_shape); in ResizeOutputTensor() 60 const int* crops = GetTensorData<int32>(op_context->crops); in ResizeOutputTensor() 62 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), in ResizeOutputTensor() 64 TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0], in ResizeOutputTensor() 66 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->crops), in ResizeOutputTensor() 98 return context->ResizeTensor(context, op_context->output, output_size); in ResizeOutputTensor() 105 BatchToSpaceNDContext op_context(context, node); in Prepare() local 106 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.input), in Prepare() [all …]
|
D | transpose.cc | 46 TransposeContext* op_context) { in ResizeOutputTensor() argument 47 int dims = NumDimensions(op_context->input); in ResizeOutputTensor() 48 const int* perm_data = GetTensorData<int32_t>(op_context->perm); in ResizeOutputTensor() 51 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->perm), 1); in ResizeOutputTensor() 52 TF_LITE_ENSURE_EQ(context, op_context->perm->dims->data[0], dims); in ResizeOutputTensor() 59 TfLiteIntArray* input_size = op_context->input->dims; in ResizeOutputTensor() 65 return context->ResizeTensor(context, op_context->output, output_size); in ResizeOutputTensor() 72 TransposeContext op_context(context, node); in Prepare() local 75 TF_LITE_ENSURE_MSG(context, NumDimensions(op_context.input) <= 4, in Prepare() 77 TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type); in Prepare() [all …]
|
D | split.cc | 74 OpContext op_context(context, node); in Prepare() local 76 TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits); in Prepare() 78 auto input_type = op_context.input->type; in Prepare() 89 if (IsConstantTensor(op_context.axis)) { in Prepare() 90 return ResizeOutputTensors(context, node, op_context.axis, op_context.input, in Prepare() 91 op_context.params->num_splits); in Prepare() 98 OpContext op_context(context, node); in Eval() local 102 if (!IsConstantTensor(op_context.axis)) { in Eval() 105 ResizeOutputTensors(context, node, op_context.axis, op_context.input, in Eval() 106 op_context.params->num_splits)); in Eval() [all …]
|
D | split_v.cc | 122 OpContext op_context(context, node); in Prepare() local 124 TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits); in Prepare() 126 auto input_type = op_context.input->type; in Prepare() 134 auto size_splits = op_context.size_splits; in Prepare() 140 if (IsConstantTensor(op_context.size_splits) && in Prepare() 141 IsConstantTensor(op_context.axis)) { in Prepare() 142 return ResizeOutputTensors(context, node, op_context.input, in Prepare() 143 op_context.size_splits, op_context.axis); in Prepare() 150 OpContext op_context(context, node); in Eval() local 154 if (!IsConstantTensor(op_context.axis) || in Eval() [all …]
|
D | exp.cc | 47 ExpContext op_context(context, node); in Prepare() local 48 TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims); in Prepare() 49 op_context.output->type = op_context.input->type; in Prepare() 50 return context->ResizeTensor(context, op_context.output, output_dims); in Prepare() 55 ExpContext op_context(context, node); in Eval() local 58 kernel_type::Exp<data_type>(GetTensorData<data_type>(op_context.input), \ in Eval() 59 NumElements(op_context.input), \ in Eval() 60 GetTensorData<data_type>(op_context.output)) in Eval() 64 switch (op_context.input->type) { in Eval() 71 op_context.input->type); in Eval()
|
D | squeeze.cc | 42 SqueezeContext op_context(context, node); in Prepare() local 43 int input_num_dims = NumDimensions(op_context.input); in Prepare() 44 int num_squeeze_dims = op_context.params->num_squeeze_dims; in Prepare() 47 const TfLiteIntArray* input_dims = op_context.input->dims; in Prepare() 48 const int* squeeze_dims = op_context.params->squeeze_dims; in Prepare() 77 return context->ResizeTensor(context, op_context.output, output_dims); in Prepare() 81 SqueezeContext op_context(context, node); in Eval() local 82 TF_LITE_ENSURE_EQ(context, op_context.input->bytes, op_context.output->bytes); in Eval() 83 memcpy(op_context.output->data.raw, op_context.input->data.raw, in Eval() 84 op_context.input->bytes); in Eval()
|
D | fake_quant.cc | 58 OpContext op_context(context, node); in Prepare() local 59 TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims); in Prepare() 60 op_context.output->type = op_context.input->type; in Prepare() 61 return context->ResizeTensor(context, op_context.output, output_dims); in Prepare() 66 OpContext op_context(context, node); in Eval() local 75 reference_ops::FakeQuant(op_params, GetTensorShape(op_context.input), in Eval() 76 GetTensorData<float>(op_context.input), in Eval() 77 GetTensorShape(op_context.output), in Eval() 78 GetTensorData<float>(op_context.output)); in Eval()
|
/external/tensorflow/tensorflow/core/grappler/costs/ |
D | op_level_cost_estimator.h | 37 virtual Costs PredictCosts(const OpContext& op_context) const; 44 Costs PredictCostOfAnUnknownOp(const OpContext& op_context) const; 129 Costs PredictConv2D(const OpContext& op_context) const; 130 Costs PredictCwiseOp(const OpContext& op_context) const; 131 Costs PredictConv2DBackpropInput(const OpContext& op_context) const; 132 Costs PredictConv2DBackpropFilter(const OpContext& op_context) const; 133 Costs PredictFusedConv2DBiasActivation(const OpContext& op_context) const; 134 Costs PredictMatMul(const OpContext& op_context) const; 135 Costs PredictSparseTensorDenseMatMul(const OpContext& op_context) const; 136 Costs PredictNoOp(const OpContext& op_context) const; [all …]
|
D | op_level_cost_estimator_test.cc | 77 OpContext op_context; in DescribeMatMul() local 78 SetCpuDevice(&op_context.op_info); in DescribeMatMul() 79 op_context.op_info.set_op("MatMul"); in DescribeMatMul() 81 DescribeMatrix(m, l, &op_context.op_info); in DescribeMatMul() 82 DescribeMatrix(k, n, &op_context.op_info); in DescribeMatMul() 83 return op_context; in DescribeMatMul() 113 OpContext op_context; in DescribeBatchMatMul() local 114 SetCpuDevice(&op_context.op_info); in DescribeBatchMatMul() 115 op_context.op_info.set_op("BatchMatMul"); in DescribeBatchMatMul() 117 DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info); in DescribeBatchMatMul() [all …]
|
D | op_level_cost_estimator.cc | 221 typedef Costs (OpLevelCostEstimator::*CostImpl)(const OpContext& op_context) in OpLevelCostEstimator() 224 return [this, impl](const OpContext& op_context) { in OpLevelCostEstimator() argument 225 return (this->*impl)(op_context); in OpLevelCostEstimator() 373 Costs OpLevelCostEstimator::PredictCosts(const OpContext& op_context) const { in PredictCosts() 374 const auto& op_info = op_context.op_info; in PredictCosts() 378 Costs costs = estimator(op_context); in PredictCosts() 385 return PredictVariable(op_context); in PredictCosts() 389 return PredictCwiseOp(op_context); in PredictCosts() 394 return PredictCostOfAnUnknownOp(op_context); in PredictCosts() 448 Costs OpLevelCostEstimator::PredictCwiseOp(const OpContext& op_context) const { in PredictCwiseOp() [all …]
|
D | analytical_cost_estimator.cc | 39 void AddCostNode(ReadyNodeManager* node_manager, const OpContext& op_context, in AddCostNode() argument 44 const string& op_name = op_context.name; in AddCostNode() 60 node->set_device(op_context.device_name); in AddCostNode() 92 for (const auto& output : op_context.op_info.outputs()) { in AddCostNode() 166 OpContext op_context = scheduler_->GetCurrNode(); in PredictCosts() local 167 node_costs = node_estimator_->PredictCosts(op_context); in PredictCosts() 170 inaccurate_nodes.push_back(op_context.name); in PredictCosts() 172 VLOG(4) << op_context.name << " has " in PredictCosts() 178 AddCostNode(node_manager_.get(), op_context, node_id++, node_costs, in PredictCosts()
|
D | BUILD | 216 name = "op_context", 217 hdrs = ["op_context.h"], 231 ":op_context", 288 ":op_context",
|
D | virtual_scheduler.cc | 631 OpContext op_context; in GetCurrNode() local 633 op_context.name = node->name(); in GetCurrNode() 634 op_context.device_name = node_state.device_name; in GetCurrNode() 635 auto& op_info = op_context.op_info; in GetCurrNode() 647 op_context.function_library = &grappler_item_->graph.library(); in GetCurrNode() 649 return op_context; in GetCurrNode() 765 OpContext op_context = GetCurrNode(); in MarkCurrNodeExecuted() local 767 string node_description = GetOpDescription(op_context.op_info); in MarkCurrNodeExecuted()
|
D | virtual_scheduler_test.cc | 1598 Costs SimplePredictCosts(const OpContext& op_context) const { in SimplePredictCosts() 1601 if (op_context.op_info.op() == "MatMul") { in SimplePredictCosts() 1603 } else if (op_context.op_info.op() == "RandomUniform") { in SimplePredictCosts() 1619 OpContext op_context = scheduler_->GetCurrNode(); in RunScheduler() local 1620 ops_executed[op_context.name] = op_context; in RunScheduler() 1621 std::cout << op_context.name << std::endl; in RunScheduler() 1623 Costs node_costs = SimplePredictCosts(op_context); in RunScheduler() 1626 auto it = dependency_.find(op_context.name); in RunScheduler() 1634 if (op_context.name == target_node) { in RunScheduler()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | static_schedule.cc | 33 OpContext op_context; in PredictExecutionTime() local 34 op_context.op_info.set_op(node.op()); in PredictExecutionTime() 35 *op_context.op_info.mutable_attr() = node.attr(); in PredictExecutionTime() 40 op_context.op_info.add_inputs()->Swap(&input); in PredictExecutionTime() 46 op_context.op_info.add_outputs()->Swap(&output); in PredictExecutionTime() 50 op_context.op_info.mutable_device()->Swap(&device); in PredictExecutionTime() 53 estimator.PredictCosts(op_context).execution_time; in PredictExecutionTime()
|
D | evaluation_utils.cc | 112 OpKernelContext op_context(¶ms); in EvaluateNode() local 113 op_kernel->Compute(&op_context); in EvaluateNode() 115 output->push_back(op_context.release_output(i)); in EvaluateNode() 117 return op_context.status(); in EvaluateNode()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | graph_compiler.cc | 164 OpKernelContext op_context(¶ms, n->num_outputs()); in Compile() local 167 TF_RETURN_IF_ERROR(CompileFunctionalNode(n, &op_context)); in Compile() 169 device_->Compute(CHECK_NOTNULL(params.op_kernel), &op_context); in Compile() 170 Status s = op_context.status(); in Compile() 181 outputs[o] = op_context.release_output(o); in Compile() 220 OpKernelContext* op_context) { in CompileFunctionalNode() argument 224 XlaOpKernelContext xla_op_context(op_context); in CompileFunctionalNode() 226 XlaContext& context = XlaContext::Get(op_context); in CompileFunctionalNode()
|
D | graph_compiler.h | 79 Status CompileFunctionalNode(Node* n, OpKernelContext* op_context);
|