/external/tensorflow/tensorflow/compiler/mlir/lite/ |
D | tf_tfl_passes.cc | 38 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs, in AddQuantizationPasses() 40 pass_manager->addPass(mlir::TFL::CreatePrepareQuantizePass(quant_specs)); in AddQuantizationPasses() 41 pass_manager->addPass(mlir::TFL::CreateQuantizePass()); in AddQuantizationPasses() 45 mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops)); in AddQuantizationPasses() 49 pass_manager->addPass(mlir::TFL::CreateDefaultQuantParamsPass( in AddQuantizationPasses() 52 pass_manager->addPass(mlir::TFL::CreateQuantizePass()); in AddQuantizationPasses() 54 mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops)); in AddQuantizationPasses() 58 void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config, in AddTFToTFLConversionPasses() 84 pass_manager->addPass(mlir::TFL::CreateLowerStaticTensorListPass()); in AddTFToTFLConversionPasses() 89 pass_manager->addPass(mlir::TFL::CreatePrepareCompositeFunctionsPass()); in AddTFToTFLConversionPasses() [all …]
|
D | tf_tfl_passes.h | 27 void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config, 32 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
|
D | flatbuffer_translate.cc | 115 namespace tfl = mlir::TFL; 431 mlir::TFL::WhileOp op, const std::vector<int32_t>& operands, 444 mlir::TFL::NumericVerifyOp op, const std::vector<int32_t>& operands, 448 Operation* inst, mlir::TFL::Convolution2DTransposeBiasOp op, 452 Operation* inst, mlir::TFL::MaxPoolingWithArgMax2DOp op, 456 Operation* inst, mlir::TFL::MaxUnpooling2DOp op, 509 std::string GetWhileBodyName(mlir::TFL::WhileOp while_op); 510 std::string GetWhileCondName(mlir::TFL::WhileOp while_op); 725 std::string Translator::GetWhileBodyName(mlir::TFL::WhileOp while_op) { in GetWhileBodyName() 729 std::string Translator::GetWhileCondName(mlir::TFL::WhileOp while_op) { in GetWhileCondName() [all …]
|
D | tf_tfl_translate.cc | 147 mlir::TFL::QuantizationSpecs quant_specs; in main() 148 if (mlir::TFL::ParseInputNodeQuantSpecs(input_arrays, min_values, max_values, in main() 179 mlir::TFL::PassConfig pass_config(quant_specs); in main()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/ |
D | optimize.cc | 50 namespace TFL { namespace 200 struct FuseFullyConnectedAndAdd : public OpRewritePattern<TFL::AddOp> { 201 using OpRewritePattern<TFL::AddOp>::OpRewritePattern; 203 PatternMatchResult matchAndRewrite(TFL::AddOp add_op, in matchAndRewrite() 213 dyn_cast_or_null<TFL::FullyConnectedOp>(add_op.lhs().getDefiningOp()); in matchAndRewrite() 279 rewriter.replaceOpWithNewOp<TFL::FullyConnectedOp>( in matchAndRewrite() 294 struct FuseFullyConnectedAndRelu : public OpRewritePattern<TFL::ReluOp> { 295 using OpRewritePattern<TFL::ReluOp>::OpRewritePattern; 297 PatternMatchResult matchAndRewrite(TFL::ReluOp relu_op, in matchAndRewrite() 321 struct FuseFullyConnectedAndMul : public OpRewritePattern<TFL::MulOp> { [all …]
|
D | default_quant_params.cc | 40 namespace TFL { namespace 60 auto biases = TFL::GetOpQuantSpec(use.getOwner())->biases_params; in UsedAsBias() 132 auto spec = TFL::GetOpQuantSpec(op); in runOnFunction() 155 llvm::isa<TFL::QuantizeOp>(*value.getUsers().begin())) in AddToWorkListIfUnquantized() 177 auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type, in QuantizeValue() 179 auto dequantize = builder.create<TFL::DequantizeOp>( in QuantizeValue() 195 if (auto dequant = llvm::dyn_cast<TFL::DequantizeOp>(non_bias_define)) { in GetQuantParamsForBias()
|
D | prepare_composite_functions_tf.cc | 46 namespace TFL { namespace 64 auto op = builder.create<mlir::TFL::EmbeddingLookupOp>( in RewriteFunc() 116 } else if (attr.getValue() == mlir::TFL::kLstmCellSimple) { in ConvertTFImplements() 123 } else if (attr.getValue() == mlir::TFL::kLayerNormalizedLstmCellSimple) { in ConvertTFImplements()
|
D | prepare_tf.cc | 64 namespace TFL { namespace 163 auto quantize = rewriter.create<TFL::QuantizeOp>( in matchAndRewrite() 165 auto dequantize = rewriter.create<TFL::DequantizeOp>( in matchAndRewrite() 304 TFL::Conv2DOp createTFLOp(ConvertTFConvOpMatchState *state, in createTFLOp() 309 return rewriter.create<TFL::Conv2DOp>( in createTFLOp() 359 TFL::DepthwiseConv2DOp createTFLOp(ConvertTFConvOpMatchState *state, in createTFLOp() 371 return rewriter.create<TFL::DepthwiseConv2DOp>( in createTFLOp() 514 TFL::populateWithGenerated(ctx, &patterns); in runOnFunction() 525 TFL::populateWithGenerated(ctx, &patterns); in runOnFunction()
|
D | legalize_tf.cc | 55 namespace TFL { namespace 133 rewriter.replaceOpWithNewOp<TFL::ConcatenationOp>( in matchAndRewrite() 134 op, output_type, values, mlir::TFL::ExtractSingleElementAsInteger(axis), in matchAndRewrite() 230 rewriter.replaceOpWithNewOp<TFL::SplitOp>(op, output_types, in matchAndRewrite() 245 rewriter.replaceOpWithNewOp<TFL::SplitVOp>( in matchAndRewrite() 287 rewriter.replaceOpWithNewOp<TFL::StridedSliceOp>( in matchAndRewrite() 320 rewriter.replaceOpWithNewOp<TFL::StridedSliceOp>( in matchAndRewrite() 511 rewriter.replaceOpWithNewOp<TFL::DivOp>(op, status_or_const_op.ValueOrDie(), in matchAndRewrite()
|
D | quantize.cc | 59 namespace TFL { namespace 88 TFL::populateWithGenerated(ctx, &patterns); in runOnFunction()
|
D | post_quantize.cc | 29 namespace TFL { namespace 127 TFL::populateWithGenerated(ctx, &patterns); in runOnFunction()
|
D | legalize_ophint_func_op.cc | 38 namespace TFL { namespace 114 *fused_op = builder->create<TFL::UnidirectionalSequenceRNNOp>( in BuildUnidirectionalSequenceRnnOp() 188 *fused_op = builder->create<TFL::UnidirectionalSequenceLSTMOp>( in BuildUnidirectionalSequenceLSTMOp()
|
D | dilated_conv.cc | 18 namespace TFL { namespace
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
D | quantize_model.cc | 76 TFL::QuantizationSpecs quant_specs; in QuantizeModel() 88 pm.addPass(TFL::CreatePrepareQuantizePass(quant_specs)); in QuantizeModel() 89 pm.addPass(TFL::CreateQuantizePass()); in QuantizeModel() 90 pm.addPass(TFL::CreatePostQuantizePass(emit_adaptor)); in QuantizeModel()
|
D | tfl_to_std.h | 21 namespace TFL {
|
/external/tensorflow/tensorflow/compiler/mlir/lite/utils/ |
D | lstm_utils_test.cc | 41 namespace TFL { namespace 116 mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_); in TEST_F() 153 mlir::TFL::LSTMOp::getOperationName()); in TEST_F() 189 mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_cifg_); in TEST_F() 206 mlir::TFL::LSTMOp::getOperationName()); in TEST_F() 214 mlir::TFL::ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM convert( in TEST_F() 232 mlir::TFL::LSTMOp::getOperationName()); in TEST_F()
|
D | lstm_utils.h | 32 namespace TFL { 155 mlir::TFL::LSTMOp lstm_;
|
D | stateful_ops_utils.h | 22 namespace TFL {
|
D | stateful_ops_utils.cc | 24 namespace TFL { namespace
|
/external/llvm/lib/Target/X86/ |
D | X86CallFrameOptimization.cpp | 106 const X86FrameLowering *TFL; member in __anon091004fc0111::X86CallFrameOptimization 133 (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF)))) in isLegal() 182 unsigned StackAlign = TFL->getStackAlignment(); in isProfitable() 219 TFL = STI->getFrameLowering(); in runOnMachineFunction() 532 if (!TFL->hasFP(MF)) in adjustCallSequence() 533 TFL->BuildCFI( in adjustCallSequence()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86CallFrameOptimization.cpp | 127 const X86FrameLowering *TFL; member in __anonf67ef9940111::X86CallFrameOptimization 151 (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF)))) in isLegal() 200 unsigned StackAlign = TFL->getStackAlignment(); in isProfitable() 237 TFL = STI->getFrameLowering(); in runOnMachineFunction() 578 if (!TFL->hasFP(MF)) in adjustCallSequence() 579 TFL->BuildCFI( in adjustCallSequence()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/ir/ |
D | tfl_op_interfaces.td | 24 // TFL op interface for stateful operands. 43 // TFL op interface for output channel index.
|
D | dialect_registration.cc | 19 static mlir::DialectRegistration<mlir::TFL::TensorFlowLiteDialect> tfl_ops;
|
D | tfl_ops.h | 35 namespace TFL {
|
/external/tensorflow/tensorflow/compiler/mlir/lite/python/ |
D | graphdef_to_tfl_flatbuffer.cc | 173 mlir::TFL::QuantizationSpecs quant_specs; in ConvertGraphDefToTFLiteFlatBuffer() 216 if (mlir::TFL::GetInputNodeQuantSpecs(node_names, node_mins, node_maxs, in ConvertGraphDefToTFLiteFlatBuffer() 275 mlir::TFL::PassConfig pass_config(quant_specs); in ConvertGraphDefToTFLiteFlatBuffer()
|