• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "optimize_bytecode.h"
17 
18 #include "bytecodeopt_options.h"
19 #include "bytecode_analysis_results.h"
20 #include "codegen.h"
21 #include "common.h"
22 #include "compiler/optimizer/ir_builder/ir_builder.h"
23 #include "compiler/optimizer/optimizations/branch_elimination.h"
24 #include "compiler/optimizer/optimizations/cleanup.h"
25 #include "compiler/optimizer/optimizations/lowering.h"
26 #include "compiler/optimizer/optimizations/move_constants.h"
27 #include "compiler/optimizer/optimizations/regalloc/reg_alloc.h"
28 #include "compiler/optimizer/optimizations/vn.h"
29 #include "constant_propagation/constant_propagation.h"
30 #include "libpandabase/mem/pool_manager.h"
31 #include "libpandafile/class_data_accessor-inl.h"
32 #include "libpandafile/module_data_accessor-inl.h"
33 #include "reg_acc_alloc.h"
34 #include "reg_encoder.h"
35 #include "runtime_adapter.h"
36 
37 namespace panda::bytecodeopt {
38 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
39 panda::bytecodeopt::Options options("");
40 
41 template <typename T>
RunOpts(compiler::Graph * graph)42 constexpr void RunOpts(compiler::Graph *graph)
43 {
44     graph->RunPass<compiler::Cleanup>();
45     graph->RunPass<T>();
46 }
47 
48 template <typename First, typename Second, typename... Rest>
RunOpts(compiler::Graph * graph)49 constexpr void RunOpts(compiler::Graph *graph)
50 {
51     RunOpts<First>(graph);
52     RunOpts<Second, Rest...>(graph);
53 }
54 
RunOptimizations(compiler::Graph * graph,BytecodeOptIrInterface * iface)55 bool RunOptimizations(compiler::Graph *graph, BytecodeOptIrInterface *iface)
56 {
57     constexpr int OPT_LEVEL_0 = 0;
58 
59     if (panda::bytecodeopt::options.GetOptLevel() == OPT_LEVEL_0) {
60         return false;
61     }
62 
63     graph->RunPass<compiler::Cleanup>();
64     ASSERT(graph->IsDynamicMethod());
65 
66     if (compiler::options.IsCompilerBranchElimination()) {
67         graph->RunPass<ConstantPropagation>(iface);
68         RunOpts<compiler::BranchElimination>(graph);
69     }
70 
71     RunOpts<compiler::ValNum, compiler::Lowering, compiler::MoveConstants>(graph);
72 
73     // this pass should run just before register allocator
74     graph->RunPass<compiler::Cleanup>();
75     graph->RunPass<RegAccAlloc>();
76 
77     graph->RunPass<compiler::Cleanup>();
78     if (!RegAlloc(graph)) {
79         LOG(ERROR, BYTECODE_OPTIMIZER) << "Failed compiler::RegAlloc";
80         return false;
81     }
82 
83     graph->RunPass<compiler::Cleanup>();
84     if (!graph->RunPass<RegEncoder>()) {
85         LOG(ERROR, BYTECODE_OPTIMIZER) << "Failed RegEncoder";
86         return false;
87     }
88 
89     return true;
90 }
91 
BuildMapFromPcToIns(pandasm::Function & function,BytecodeOptIrInterface & ir_interface,const compiler::Graph * graph,compiler::RuntimeInterface::MethodPtr method_ptr)92 void BuildMapFromPcToIns(pandasm::Function &function, BytecodeOptIrInterface &ir_interface,
93                          const compiler::Graph *graph, compiler::RuntimeInterface::MethodPtr method_ptr)
94 {
95     CHECK_NOT_NULL(graph);
96     function.local_variable_debug.clear();
97     auto *pc_ins_map = ir_interface.GetPcInsMap();
98     pc_ins_map->reserve(function.ins.size());
99     auto instructions_buf = graph->GetRuntime()->GetMethodCode(method_ptr);
100     compiler::BytecodeInstructions instructions(instructions_buf, graph->GetRuntime()->GetMethodCodeSize(method_ptr));
101     compiler::BytecodeIterator insn_iter = instructions.begin();
102     for (pandasm::Ins &ins : function.ins) {
103         /**
104          * pc_ins_map is built with instructions data from the emitted abc file and the original function assembly.
105          * Instructions of invalid opcode will be removed during emitter, but kept within function assembly structure,
106          * therefore these instructions need to be skipped here
107          **/
108         if (ins.opcode == pandasm::Opcode::INVALID) {
109             continue;
110         }
111         pc_ins_map->emplace(instructions.GetPc(*insn_iter), &ins);
112         ++insn_iter;
113         if (insn_iter == instructions.end()) {
114             break;
115         }
116     }
117 }
118 
ColumnNumberPropagate(pandasm::Function * function)119 static void ColumnNumberPropagate(pandasm::Function *function)
120 {
121     auto &ins_vec = function->ins;
122     uint32_t cn = compiler::INVALID_COLUMN_NUM;
123     // handle the instructions that are at the beginning of code but do not have column number
124     size_t k = 0;
125     while (k < ins_vec.size() && cn == compiler::INVALID_COLUMN_NUM) {
126         cn = ins_vec[k++].ins_debug.column_number;
127     }
128     if (cn == compiler::INVALID_COLUMN_NUM) {
129         LOG(DEBUG, BYTECODE_OPTIMIZER) << "Failed ColumnNumberPropagate: All insts have invalid column number";
130         return;
131     }
132     for (size_t j = 0; j < k - 1; j++) {
133         ins_vec[j].ins_debug.SetColumnNumber(cn);
134     }
135 
136     // handle other instructions that do not have column number
137     for (; k < ins_vec.size(); k++) {
138         if (ins_vec[k].ins_debug.column_number != compiler::INVALID_COLUMN_NUM) {
139             cn = ins_vec[k].ins_debug.column_number;
140         } else {
141             ins_vec[k].ins_debug.SetColumnNumber(cn);
142         }
143     }
144 }
145 
LineNumberPropagate(pandasm::Function * function)146 static void LineNumberPropagate(pandasm::Function *function)
147 {
148     if (function == nullptr || function->ins.empty()) {
149         return;
150     }
151     size_t ln = 0;
152     auto &ins_vec = function->ins;
153 
154     // handle the instructions that are at the beginning of code but do not have line number
155     size_t i = 0;
156     while (i < ins_vec.size() && ln == 0) {
157         ln = ins_vec[i++].ins_debug.line_number;
158     }
159     if (ln == 0) {
160         LOG(DEBUG, BYTECODE_OPTIMIZER) << "Failed LineNumberPropagate: All insts have invalid line number";
161         return;
162     }
163     for (size_t j = 0; j < i - 1; j++) {
164         ins_vec[j].ins_debug.SetLineNumber(ln);
165     }
166 
167     // handle other instructions that do not have line number
168     for (; i < ins_vec.size(); i++) {
169         if (ins_vec[i].ins_debug.line_number != 0) {
170             ln = ins_vec[i].ins_debug.line_number;
171         } else {
172             ins_vec[i].ins_debug.SetLineNumber(ln);
173         }
174     }
175 }
176 
DebugInfoPropagate(pandasm::Function & function,const compiler::Graph * graph,BytecodeOptIrInterface & ir_interface)177 static void DebugInfoPropagate(pandasm::Function &function, const compiler::Graph *graph,
178                                BytecodeOptIrInterface &ir_interface)
179 {
180     LineNumberPropagate(&function);
181     if (graph->IsDynamicMethod()) {
182         ColumnNumberPropagate(&function);
183     }
184     ir_interface.ClearPcInsMap();
185 }
186 
SkipFunction(const pandasm::Function & function,const std::string & func_name)187 static bool SkipFunction(const pandasm::Function &function, const std::string &func_name)
188 {
189     if (panda::bytecodeopt::options.WasSetMethodRegex()) {
190         static std::regex rgx(panda::bytecodeopt::options.GetMethodRegex());
191         if (!std::regex_match(func_name, rgx)) {
192             LOG(INFO, BYTECODE_OPTIMIZER) << "Skip Function " << func_name << ": Function's name doesn't match regex";
193             return true;
194         }
195     }
196 
197     if (panda::bytecodeopt::options.IsSkipMethodsWithEh() && !function.catch_blocks.empty()) {
198         LOG(INFO, BYTECODE_OPTIMIZER) << "Was not optimized " << func_name << ": Function has catch blocks";
199         return true;
200     }
201 
202     if ((function.regs_num + function.GetParamsNum()) > compiler::VIRTUAL_FRAME_SIZE) {
203         LOG(ERROR, BYTECODE_OPTIMIZER) << "Unable to optimize " << func_name
204                                        << ": Function frame size is larger than allowed one";
205         return true;
206     }
207     return false;
208 }
209 
SetCompilerOptions()210 static void SetCompilerOptions()
211 {
212     compiler::options.SetCompilerUseSafepoint(false);
213     compiler::options.SetCompilerSupportInitObjectInst(true);
214     if (!compiler::options.WasSetCompilerMaxBytecodeSize()) {
215         compiler::options.SetCompilerMaxBytecodeSize(MAX_BYTECODE_SIZE);
216     }
217 }
218 
StringStartsWith(const std::string & str,const std::string & prefix)219 static bool StringStartsWith(const std::string &str, const std::string &prefix)
220 {
221     return (str.size() >= prefix.size()) &&
222         std::equal(prefix.begin(), prefix.end(), str.begin());
223 }
224 
ModuleRequestOffsetToRecordName(const panda_file::File & pfile,uint32_t module_request_offset)225 static std::string ModuleRequestOffsetToRecordName(const panda_file::File &pfile,
226                                                    uint32_t module_request_offset)
227 {
228     constexpr char AND_TOKEN = '&';
229     const std::string BUNDLE_PREFIX = "@bundle:";
230     const std::string PACKAGE_PREFIX = "@package:";
231     const std::string NORMALIZED_NON_NATIVE_PREFIX = "@normalized:N&";
232 
233     auto record_ohmurl = GetStringFromPandaFile(pfile, module_request_offset);
234     // Assumptions of the current possible ohmurl formats:
235     // @bundle:record_name
236     // @package:record_name
237     // @normalized:N&xxxx&record_name
238     // Extract record_name from each possible cases.
239     if (StringStartsWith(record_ohmurl, BUNDLE_PREFIX)) {
240         return record_ohmurl.substr(BUNDLE_PREFIX.size());
241     } else if (StringStartsWith(record_ohmurl, PACKAGE_PREFIX)) {
242         return record_ohmurl.substr(PACKAGE_PREFIX.size());
243     } else if (StringStartsWith(record_ohmurl, NORMALIZED_NON_NATIVE_PREFIX)) {
244         size_t second_and_pos = record_ohmurl.find(AND_TOKEN, NORMALIZED_NON_NATIVE_PREFIX.size());
245         if (second_and_pos != std::string::npos) {
246             return record_ohmurl.substr(second_and_pos + 1);
247         }
248     }
249     // Otherwise, return empty string to represent no ohmurl is found
250     return "";
251 }
252 
AnalysisModuleRecordInfoOfModuleDataAccessor(const panda_file::File & pfile,panda_file::ModuleDataAccessor & mda,BytecodeAnalysisResult & result)253 static void AnalysisModuleRecordInfoOfModuleDataAccessor(const panda_file::File &pfile,
254                                                          panda_file::ModuleDataAccessor &mda,
255                                                          BytecodeAnalysisResult &result)
256 {
257     const auto &request_modules_offsets = mda.getRequestModules();
258     int regular_import_idx = 0;
259     std::unordered_set<std::string> local_export_local_names;
260     mda.EnumerateModuleRecord([&](panda_file::ModuleTag tag, uint32_t export_name_offset,
261                                   uint32_t request_module_idx, uint32_t import_name_offset,
262                                   uint32_t local_name_offset) {
263         switch (tag) {
264             case panda_file::ModuleTag::LOCAL_EXPORT: {
265                 std::string export_name = GetStringFromPandaFile(pfile, export_name_offset);
266                 std::string local_name = GetStringFromPandaFile(pfile, local_name_offset);
267                 // Slot of stmodulevar/ldlocalmodulevar is the index of its local name, while
268                 // one local name can match multiple external names with "export...as...".
269                 // Local export entries are sorted by their local name, thus using an unrodered_set
270                 // can get the correct index form (size - 1) (starts from 0).
271                 // See SourceTextModuleRecord::AssignIndexToModuleVariable for more details
272                 local_export_local_names.insert(local_name);
273                 result.SetLocalExportInfo(local_export_local_names.size() - 1, export_name);
274                 break;
275             }
276             case panda_file::ModuleTag::REGULAR_IMPORT: {
277                 std::string request_module_name =
278                     ModuleRequestOffsetToRecordName(pfile, request_modules_offsets[request_module_idx]);
279                 if (!request_module_name.empty()) {
280                     std::string import_name = GetStringFromPandaFile(pfile, import_name_offset);
281                     result.SetRegularImportInfo(regular_import_idx, import_name, request_module_name);
282                 }
283                 regular_import_idx++;
284                 break;
285             }
286             case panda_file::ModuleTag::NAMESPACE_IMPORT: {
287                 // Slot of getmodulenamespace bytecode is its request_module_idx
288                 std::string namespace_name =
289                     ModuleRequestOffsetToRecordName(pfile, request_modules_offsets[request_module_idx]);
290                 if (!namespace_name.empty()) {
291                     result.SetNamespaceImportInfo(request_module_idx, namespace_name);
292                 }
293                 break;
294             }
295             default:
296                 break;
297         }
298     });
299 }
300 
AnalysisModuleRecordInfo(const panda_file::File & pfile,panda_file::ClassDataAccessor & cda,BytecodeAnalysisResult & result)301 static void AnalysisModuleRecordInfo(const panda_file::File &pfile,
302                                      panda_file::ClassDataAccessor &cda,
303                                      BytecodeAnalysisResult &result)
304 {
305     const std::string MODULE_RECORD_IDX_FIELD_NAME = "moduleRecordIdx";
306     // RequireGlobalOptimization is true only under mergeAbc mode, where module record is stored
307     // in the moduleRecordIdx field according to Emitter::AddSourceTextModuleRecord
308     cda.EnumerateFields([&](panda_file::FieldDataAccessor &fda) -> void {
309         if (fda.IsExternal()) {
310             return;
311         }
312         std::string field_name = GetStringFromPandaFile(pfile, fda.GetNameId().GetOffset());
313         if (field_name == MODULE_RECORD_IDX_FIELD_NAME) {
314             panda_file::File::EntityId module_entity_id(fda.GetValue<uint32_t>().value());
315             panda_file::ModuleDataAccessor mda(pfile, module_entity_id);
316             AnalysisModuleRecordInfoOfModuleDataAccessor(pfile, mda, result);
317         }
318     });
319 }
320 
AnalysisModuleConstantValue(panda_file::ClassDataAccessor & cda,const std::string & record_name,bool is_dynamic,const BytecodeOptIrInterface & ir_interface,BytecodeAnalysisResult & result)321 static void AnalysisModuleConstantValue(panda_file::ClassDataAccessor &cda, const std::string &record_name,
322                                         bool is_dynamic, const BytecodeOptIrInterface &ir_interface,
323                                         BytecodeAnalysisResult &result)
324 {
325     const std::string MAIN_METHOD_NAME = ".func_main_0";
326     cda.EnumerateMethods([MAIN_METHOD_NAME, record_name, is_dynamic, ir_interface, &result](
327             panda_file::MethodDataAccessor &mda) {
328         if (mda.IsExternal()) {
329             return false;
330         }
331 
332         // Only analysis func_main_0 for now, since the assignment instruction of all exported constants
333         // are in func_main_0, and the bytecode analysis phase only contains analysing initial value of
334         // module constants for branch-elimination for now
335         auto func_name = ir_interface.GetMethodIdByOffset(mda.GetMethodId().GetOffset());
336         if (func_name != record_name + MAIN_METHOD_NAME) {
337             return true;
338         }
339 
340         ArenaAllocator allocator {SpaceType::SPACE_TYPE_COMPILER};
341         ArenaAllocator local_allocator {SpaceType::SPACE_TYPE_COMPILER, nullptr, true};
342 
343         auto *prog = ir_interface.GetProgram();
344         auto it = prog->function_table.find(func_name);
345         if (it == prog->function_table.end()) {
346             LOG(ERROR, BYTECODE_OPTIMIZER) << "Cannot find function: " << func_name;
347             return false;
348         }
349 
350         panda::pandasm::Function &function = it->second;
351         if (SkipFunction(function, func_name)) {
352             return false;
353         }
354 
355         auto method_ptr = reinterpret_cast<compiler::RuntimeInterface::MethodPtr>(mda.GetMethodId().GetOffset());
356         panda::BytecodeOptimizerRuntimeAdapter adapter(mda.GetPandaFile());
357         auto graph = allocator.New<compiler::Graph>(&allocator, &local_allocator, Arch::NONE, method_ptr, &adapter,
358                                                     false, nullptr, is_dynamic, true);
359         if ((graph == nullptr) || !graph->RunPass<panda::compiler::IrBuilder>()) {
360             LOG(ERROR, BYTECODE_OPTIMIZER) << "Analysis " << func_name << ": IR builder failed!";
361             return false;
362         }
363 
364         ModuleConstantAnalysisResult module_constant_results;
365         ModuleConstantAnalyzer analyzer(graph, result.GetConstantLocalExportSlots(),
366                                         module_constant_results, &ir_interface);
367         graph->RunPass<ModuleConstantAnalyzer>(&analyzer);
368         result.SetModuleConstantAnalysisResult(module_constant_results);
369         return true;
370     });
371 }
372 
AnalysisBytecode(pandasm::Program * prog,const pandasm::AsmEmitter::PandaFileToPandaAsmMaps * maps,const std::string & pfile_name,bool is_dynamic,bool has_memory_pool)373 bool AnalysisBytecode(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps,
374                       const std::string &pfile_name, bool is_dynamic, bool has_memory_pool)
375 {
376     if (!has_memory_pool) {
377         PoolManager::Initialize(PoolType::MALLOC);
378     }
379 
380     auto pfile = panda_file::OpenPandaFile(pfile_name);
381     if (!pfile) {
382         LOG(FATAL, BYTECODE_OPTIMIZER) << "Can not open binary file: " << pfile_name;
383         return false;
384     }
385 
386     for (uint32_t id : pfile->GetClasses()) {
387         panda_file::File::EntityId record_id {id};
388 
389         if (pfile->IsExternal(record_id)) {
390             continue;
391         }
392 
393         panda_file::ClassDataAccessor cda {*pfile, record_id};
394         // Skip annotation records since they do not contain real code for now
395         if (cda.IsAnnotation()) {
396             continue;
397         }
398         std::string record_type_descriptor(utf::Mutf8AsCString(cda.GetName().data));
399         std::string record_name = pandasm::Type::FromDescriptor(record_type_descriptor).GetName();
400 
401         bool exists = false;
402         auto &result = BytecodeAnalysisResults::GetOrCreateBytecodeAnalysisResult(record_name, exists);
403         if (exists) {
404             return true;
405         }
406         auto ir_interface = BytecodeOptIrInterface(maps, prog);
407         AnalysisModuleRecordInfo(*pfile, cda, result);
408         AnalysisModuleConstantValue(cda, record_name, is_dynamic, ir_interface, result);
409     }
410 
411     if (!has_memory_pool) {
412         PoolManager::Finalize();
413     }
414 
415     return true;
416 }
417 
OptimizeFunction(pandasm::Program * prog,const pandasm::AsmEmitter::PandaFileToPandaAsmMaps * maps,const panda_file::MethodDataAccessor & mda,bool is_dynamic)418 bool OptimizeFunction(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps,
419                       const panda_file::MethodDataAccessor &mda, bool is_dynamic)
420 {
421     ArenaAllocator allocator {SpaceType::SPACE_TYPE_COMPILER};
422     ArenaAllocator local_allocator {SpaceType::SPACE_TYPE_COMPILER, nullptr, true};
423 
424     SetCompilerOptions();
425 
426     auto ir_interface = BytecodeOptIrInterface(maps, prog);
427 
428     auto func_name = ir_interface.GetMethodIdByOffset(mda.GetMethodId().GetOffset());
429     LOG(INFO, BYTECODE_OPTIMIZER) << "Optimizing function: " << func_name;
430 
431     auto it = prog->function_table.find(func_name);
432     if (it == prog->function_table.end()) {
433         LOG(ERROR, BYTECODE_OPTIMIZER) << "Cannot find function: " << func_name;
434         return false;
435     }
436     auto method_ptr = reinterpret_cast<compiler::RuntimeInterface::MethodPtr>(mda.GetMethodId().GetOffset());
437 
438     panda::BytecodeOptimizerRuntimeAdapter adapter(mda.GetPandaFile());
439     auto graph = allocator.New<compiler::Graph>(&allocator, &local_allocator, Arch::NONE, method_ptr, &adapter, false,
440                                                 nullptr, is_dynamic, true);
441 
442     panda::pandasm::Function &function = it->second;
443 
444     if (SkipFunction(function, func_name)) {
445         return false;
446     }
447 
448     // build map from pc to pandasm::ins (to re-build line-number info in BytecodeGen)
449     BuildMapFromPcToIns(function, ir_interface, graph, method_ptr);
450 
451     if ((graph == nullptr) || !graph->RunPass<panda::compiler::IrBuilder>()) {
452         LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": IR builder failed!";
453         return false;
454     }
455 
456     if (graph->HasIrreducibleLoop()) {
457         LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Graph has irreducible loop!";
458         return false;
459     }
460 
461     if (!RunOptimizations(graph, &ir_interface)) {
462         LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Running optimizations failed!";
463         return false;
464     }
465 
466     if (!graph->RunPass<BytecodeGen>(&function, &ir_interface, prog)) {
467         LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Code generation failed!";
468         return false;
469     }
470 
471     DebugInfoPropagate(function, graph, ir_interface);
472 
473     function.value_of_first_param =
474         static_cast<int64_t>(graph->GetStackSlotsCount()) - 1;  // Work-around promotion rules
475     function.regs_num = static_cast<size_t>(function.value_of_first_param + 1);
476 
477     if (auto frame_size = function.regs_num + function.GetParamsNum(); frame_size >= NUM_COMPACTLY_ENCODED_REGS) {
478         LOG(INFO, BYTECODE_OPTIMIZER) << "Function " << func_name << " has frame size " << frame_size;
479     }
480 
481     LOG(DEBUG, BYTECODE_OPTIMIZER) << "Optimized " << func_name;
482 
483     return true;
484 }
485 
OptimizePandaFile(pandasm::Program * prog,const pandasm::AsmEmitter::PandaFileToPandaAsmMaps * maps,const std::string & pfile_name,bool is_dynamic)486 bool OptimizePandaFile(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps,
487                        const std::string &pfile_name, bool is_dynamic)
488 {
489     auto pfile = panda_file::OpenPandaFile(pfile_name);
490     if (!pfile) {
491         LOG(FATAL, BYTECODE_OPTIMIZER) << "Can not open binary file: " << pfile_name;
492     }
493 
494     bool result = true;
495 
496     for (uint32_t id : pfile->GetClasses()) {
497         panda_file::File::EntityId record_id {id};
498 
499         if (pfile->IsExternal(record_id)) {
500             continue;
501         }
502 
503         panda_file::ClassDataAccessor cda {*pfile, record_id};
504         cda.EnumerateMethods([prog, maps, is_dynamic, &result](panda_file::MethodDataAccessor &mda) {
505             if (!mda.IsExternal()) {
506                 result = OptimizeFunction(prog, maps, mda, is_dynamic) && result;
507             }
508         });
509     }
510 
511     return result;
512 }
513 
OptimizeBytecode(pandasm::Program * prog,const pandasm::AsmEmitter::PandaFileToPandaAsmMaps * maps,const std::string & pandafile_name,bool is_dynamic,bool has_memory_pool)514 bool OptimizeBytecode(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps,
515                       const std::string &pandafile_name, bool is_dynamic, bool has_memory_pool)
516 {
517     ASSERT(prog != nullptr);
518     ASSERT(maps != nullptr);
519 
520     if (!has_memory_pool) {
521         PoolManager::Initialize(PoolType::MALLOC);
522     }
523 
524     auto res = OptimizePandaFile(prog, maps, pandafile_name, is_dynamic);
525 
526     if (!has_memory_pool) {
527         PoolManager::Finalize();
528     }
529 
530     return res;
531 }
532 }  // namespace panda::bytecodeopt
533