/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | quantize_model.cc | 56 int subgraph_index, int op_idx, const string& operator_name, in GetOperatorProperty() argument 59 operator_property::GetOperatorProperty(model, subgraph_index, op_idx); in GetOperatorProperty() 61 const OperatorT* op = subgraph->operators[op_idx].get(); in GetOperatorProperty() 92 for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) { in PopulateRealValueOpSet() local 93 OperatorT* op = subgraph->operators[op_idx].get(); in PopulateRealValueOpSet() 96 GetOperatorProperty(operator_names, model, subgraph_idx, op_idx, in PopulateRealValueOpSet() 226 for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) { in InputQuantizeRequired() local 227 OperatorT* op = subgraph->operators[op_idx].get(); in InputQuantizeRequired() 446 for (int op_idx = subgraph->operators.size() - 1; op_idx >= 0; op_idx--) { in ApplyConstraints() local 447 OperatorT* op = subgraph->operators[op_idx].get(); in ApplyConstraints() [all …]
|
D | quantization_wrapper_utils.cc | 51 for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) { in IntermediateTensorExists() local 52 OperatorT* op = subgraph->operators[op_idx].get(); in IntermediateTensorExists() 89 for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) { in AddIntermediateTensorsToFusedOp() local 91 OperatorT* op = subgraph->operators[op_idx].get(); in AddIntermediateTensorsToFusedOp() 93 operator_property::GetOperatorProperty(model, subgraph_idx, op_idx); in AddIntermediateTensorsToFusedOp() 105 auto name = CreateTensorName(op_idx, i); in AddIntermediateTensorsToFusedOp()
|
D | model_utils.cc | 137 for (int op_idx = subgraph->operators.size() - 1; op_idx >= 0; op_idx--) { in SetOperatorCodeVersion() local 138 OperatorT* op = subgraph->operators[op_idx].get(); in SetOperatorCodeVersion() 141 operator_property::GetOperatorProperty(model, subgraph_idx, op_idx); in SetOperatorCodeVersion()
|
D | modify_model_interface.cc | 65 for (int32_t op_idx = subgraph->operators.size() - 1; op_idx >= 0; in GetInputTensors() local 66 op_idx--) { in GetInputTensors() 67 OperatorT* op = subgraph->operators[op_idx].get(); in GetInputTensors() 114 result.push_back({subgraph_idx, op->inputs[0], op_idx, op->outputs[0], in GetInputTensors() 140 for (int32_t op_idx = subgraph->operators.size() - 1; op_idx >= 0; in GetOutputTensors() local 141 op_idx--) { in GetOutputTensors() 142 OperatorT* op = subgraph->operators[op_idx].get(); in GetOutputTensors() 189 result.push_back({subgraph_idx, op->inputs[0], op_idx, op->outputs[0], in GetOutputTensors()
|
D | quantize_weights.cc | 42 int32_t op_idx; member 64 for (size_t op_idx = 0; op_idx < subgraph->operators.size(); ++op_idx) { in GetTensorConsumers() local 65 OperatorT* op = subgraph->operators[op_idx].get(); in GetTensorConsumers() 72 {op, static_cast<int32_t>(op_idx), static_cast<int32_t>(i)}); in GetTensorConsumers() 525 min_op_idx = std::min(dequant_op_info.op_idx, min_op_idx); in QuantizeWeightsInt8() 609 min_op_idx = std::min(dequant_op_info.op_idx, min_op_idx); in QuantizeWeightsFloat16()
|
D | quantize_weights_test.cc | 120 for (size_t op_idx = 0; op_idx < subgraph->operators()->size(); ++op_idx) { in GetProducerOpCode() local 121 const auto op = subgraph->operators()->Get(op_idx); in GetProducerOpCode()
|
D | quantization_utils.h | 144 float GetEffectiveScale(ModelT* model, SubGraphT* subgraph, int op_idx,
|
D | quantization_utils.cc | 714 float GetEffectiveScale(ModelT* model, SubGraphT* subgraph, int op_idx, in GetEffectiveScale() argument 719 OperatorT* op = subgraph->operators[op_idx].get(); in GetEffectiveScale()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | R600AsmPrinter.cpp | 56 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { in EmitProgramInfoR600() local 57 const MachineOperand &MO = MI.getOperand(op_idx); in EmitProgramInfoR600()
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | R600AsmPrinter.cpp | 56 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { in EmitProgramInfoR600() local 57 const MachineOperand &MO = MI.getOperand(op_idx); in EmitProgramInfoR600()
|
/external/deqp-deps/SPIRV-Tools/source/opt/ |
D | fix_storage_class.cpp | 33 [&uses](Instruction* use, uint32_t op_idx) { in Process() argument 34 uses.push_back({use, op_idx}); in Process() 175 uint32_t op_idx, std::set<uint32_t>* seen) { in PropagateType() argument 187 if (op_idx == 2) { in PropagateType() 200 if (op_idx > 2) { in PropagateType()
|
D | fix_storage_class.h | 79 bool PropagateType(Instruction* inst, uint32_t type_id, uint32_t op_idx,
|
/external/swiftshader/third_party/SPIRV-Tools/source/opt/ |
D | fix_storage_class.cpp | 33 [&uses](Instruction* use, uint32_t op_idx) { in Process() argument 34 uses.push_back({use, op_idx}); in Process() 175 uint32_t op_idx, std::set<uint32_t>* seen) { in PropagateType() argument 187 if (op_idx == 2) { in PropagateType() 200 if (op_idx > 2) { in PropagateType()
|
D | fix_storage_class.h | 79 bool PropagateType(Instruction* inst, uint32_t type_id, uint32_t op_idx,
|
/external/tensorflow/tensorflow/lite/tools/ |
D | verifier.cc | 527 for (int op_idx = 0, end = subgraph.operators()->size(); op_idx < end; in VerifySubGraphConsistency() local 528 ++op_idx) { in VerifySubGraphConsistency() 529 const auto* op = subgraph.operators()->Get(op_idx); in VerifySubGraphConsistency() 549 input_idx, op_idx, EnumNameBuiltinOperator(builtin_code)); in VerifySubGraphConsistency() 559 output_idx, op_idx, EnumNameBuiltinOperator(builtin_code)); in VerifySubGraphConsistency() 565 output_idx, op_idx, EnumNameBuiltinOperator(builtin_code)); in VerifySubGraphConsistency() 571 output_idx, op_idx, in VerifySubGraphConsistency() 578 output_idx, op_idx, in VerifySubGraphConsistency()
|
/external/angle/third_party/vulkan-deps/spirv-tools/src/source/opt/ |
D | fix_storage_class.cpp | 33 [&uses](Instruction* use, uint32_t op_idx) { in Process() argument 34 uses.push_back({use, op_idx}); in Process() 175 uint32_t op_idx, std::set<uint32_t>* seen) { in PropagateType() argument 187 if (op_idx == 2) { in PropagateType() 200 if (op_idx > 2) { in PropagateType()
|
D | fix_storage_class.h | 79 bool PropagateType(Instruction* inst, uint32_t type_id, uint32_t op_idx,
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_layout_assignment.cc | 123 } else if (optional<int64> op_idx = in AddBackendConstraints() local 125 const HloInstruction* op = instruction->operand(*op_idx); in AddBackendConstraints() 127 ColMajorShape(op->shape()), instruction, *op_idx)); in AddBackendConstraints()
|
/external/tensorflow/tensorflow/lite/python/ |
D | test_util.py | 33 for op_idx in range(subgraph.OperatorsLength()): 34 op = subgraph.Operators(op_idx)
|
/external/llvm/lib/Target/AMDGPU/ |
D | AMDGPUAsmPrinter.cpp | 262 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { in EmitProgramInfoR600() local 263 const MachineOperand &MO = MI.getOperand(op_idx); in EmitProgramInfoR600() 332 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { in getSIProgramInfo() local 333 const MachineOperand &MO = MI.getOperand(op_idx); in getSIProgramInfo()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
D | tf_ops_n_z.cc | 2993 for (int op_idx : llvm::seq<int>(0, old_num_operands)) { in matchAndRewrite() local 2994 auto body_arg = body_block.getArgument(op_idx); in matchAndRewrite() 2995 if (body_arg == yield.getOperand(op_idx)) { in matchAndRewrite() 3000 auto value = while_op.getOperand(op_idx); in matchAndRewrite() 3004 auto cond_arg = cond_block.getArgument(op_idx); in matchAndRewrite() 3008 auto result = while_op.getResult(op_idx); in matchAndRewrite() 3015 if (body_block.getArgument(op_idx).use_empty() && in matchAndRewrite() 3016 cond_block.getArgument(op_idx).use_empty() && in matchAndRewrite() 3017 while_op.getResult(op_idx).use_empty()) { in matchAndRewrite() 3018 removed_operand[op_idx] = true; in matchAndRewrite() [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | scoped_allocator_optimizer.cc | 728 for (int op_idx = 0, idx_limit = ops.size(); op_idx < idx_limit; ++op_idx) { in RewireSubgraph() local 729 NodeDef* old_op = ops[op_idx]; in RewireSubgraph() 769 *n->mutable_input(i) = strings::StrCat(sas_name, ":", op_idx); in RewireSubgraph()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_rematerialization.cc | 475 for (int64 op_idx : user->OperandIndices(buffer_alias.instruction())) { in GetUsers() local 478 ItemUse{user_item, static_cast<int>(op_idx), user_index})) { in GetUsers() 480 ItemUse{user_item, static_cast<int>(op_idx), user_index}); in GetUsers() 1568 const int64 op_idx = user.operand_number; in RematerializeInstructions() local 1584 if (user.user->instruction->operand(op_idx)->shape() != in RematerializeInstructions() 1587 user.user->instruction->operand(op_idx)->shape(), remat_use)); in RematerializeInstructions() 1591 user.user->instruction->ReplaceOperandWith(op_idx, remat_use)); in RematerializeInstructions()
|
D | tuple_points_to_analysis.cc | 726 for (int64 op_idx : alias_user->OperandIndices(alias.instruction())) { in GetAllUsesOfInstructionAtIndex() local 727 uses.emplace_back(alias_user, op_idx); in GetAllUsesOfInstructionAtIndex()
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_dnn.cc | 1069 miopenStatus_t SetConvolutionArgs(const int op_idx, const float* alpha, in SetConvolutionArgs() argument 1072 auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &conv_op); in SetConvolutionArgs() 1087 miopenStatus_t SetBiasArgs(const int op_idx, const float* alpha, in SetBiasArgs() argument 1090 auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &bias_op); in SetBiasArgs() 1105 miopenStatus_t SetBatchNormInferenceArgs(const int op_idx, const float* alpha, in SetBatchNormInferenceArgs() argument 1112 wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op); in SetBatchNormInferenceArgs() 1129 const int op_idx, const float* alpha, const float* beta, in SetBatchNormForwardArgs() argument 1135 wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op); in SetBatchNormForwardArgs() 1152 miopenStatus_t SetBatchNormBackwardArgs(const int op_idx, const float* alpha, in SetBatchNormBackwardArgs() argument 1160 wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op); in SetBatchNormBackwardArgs() [all …]
|