• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "optimizer_run.h"
17 #include "compile_method.h"
18 #include "mem/pool_manager.h"
19 #include "mem/code_allocator.h"
20 #include "include/class.h"
21 #include "include/method.h"
22 #include "optimizer/ir/ir_constructor.h"
23 #include "optimizer/ir/runtime_interface.h"
24 #include "optimizer/analysis/loop_analyzer.h"
25 #include "optimizer/pass.h"
26 #include "optimizer/ir_builder/ir_builder.h"
27 #include "utils/logger.h"
28 #include "code_info/code_info.h"
29 #include "events/events.h"
30 #include "trace/trace.h"
31 #include "optimizer/optimizations/regalloc/reg_alloc_linear_scan.h"
32 #include "optimizer/code_generator/codegen.h"
33 
34 namespace ark::compiler {
35 
36 #ifdef PANDA_COMPILER_DEBUG_INFO
37 static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *codeAllocator, ArenaAllocator *gdbDebugInfoAllocator,
38                              const std::string &methodName);
39 #endif
40 
SetCompilationStart()41 void JITStats::SetCompilationStart()
42 {
43     ASSERT(startTime_ == 0);
44     startTime_ = time::GetCurrentTimeInNanos();
45 }
EndCompilationWithStats(const std::string & methodName,bool isOsr,size_t bcSize,size_t codeSize)46 void JITStats::EndCompilationWithStats(const std::string &methodName, bool isOsr, size_t bcSize, size_t codeSize)
47 {
48     ASSERT(startTime_ != 0);
49     auto time = time::GetCurrentTimeInNanos() - startTime_;
50     statsList_.push_back(Entry {PandaString(methodName, internalAllocator_->Adapter()), isOsr, bcSize, codeSize, time});
51     startTime_ = 0;
52 }
53 
ResetCompilationStart()54 void JITStats::ResetCompilationStart()
55 {
56     startTime_ = 0;
57 }
58 
DumpCsv(char sep)59 void JITStats::DumpCsv(char sep)
60 {
61     ASSERT(g_options.WasSetCompilerDumpJitStatsCsv());
62     std::ofstream csv(g_options.GetCompilerDumpJitStatsCsv(), std::ofstream::trunc);
63     for (const auto &i : statsList_) {
64         csv << "\"" << i.methodName << "\"" << sep;
65         csv << i.isOsr << sep;
66         csv << i.bcSize << sep;
67         csv << i.codeSize << sep;
68         csv << i.time;
69         csv << '\n';
70     }
71 }
72 
EndCompilation(const std::string & methodName,bool isOsr,size_t bcSize,uintptr_t address,size_t codeSize,size_t infoSize,events::CompilationStatus status,JITStats * jitStats)73 static void EndCompilation(const std::string &methodName, bool isOsr, size_t bcSize, [[maybe_unused]] uintptr_t address,
74                            size_t codeSize, [[maybe_unused]] size_t infoSize,
75                            [[maybe_unused]] events::CompilationStatus status, JITStats *jitStats)
76 {
77     EVENT_COMPILATION(methodName, isOsr, bcSize, address, codeSize, infoSize, status);
78     if (jitStats != nullptr) {
79         ASSERT((codeSize != 0) == (status == events::CompilationStatus::COMPILED));
80         jitStats->EndCompilationWithStats(methodName, isOsr, bcSize, codeSize);
81     }
82 }
83 
ChooseArch(Arch arch)84 Arch ChooseArch(Arch arch)
85 {
86     if (arch != Arch::NONE) {
87         return arch;
88     }
89 
90     arch = RUNTIME_ARCH;
91     if (RUNTIME_ARCH == Arch::X86_64 && g_options.WasSetCompilerCrossArch()) {
92         arch = GetArchFromString(g_options.GetCompilerCrossArch());
93     }
94 
95     return arch;
96 }
97 
CheckSingleImplementation(Graph * graph)98 static bool CheckSingleImplementation(Graph *graph)
99 {
100     // Check that all methods that were inlined due to its single implementation property, still have this property,
101     // otherwise we must drop compiled code.
102     // NOTE(compiler): we need to reset hotness counter hereby avoid yet another warmup phase.
103     auto cha = graph->GetRuntime()->GetCha();
104     for (auto siMethod : graph->GetSingleImplementationList()) {
105         if (!cha->IsSingleImplementation(siMethod)) {
106             LOG(WARNING, COMPILER)
107                 << "Method lost single-implementation property after compilation, so we need to drop "
108                    "whole compiled code: "
109                 << graph->GetRuntime()->GetMethodFullName(siMethod);
110             return false;
111         }
112     }
113     return true;
114 }
115 
EmitCode(const Graph * graph,CodeAllocator * allocator)116 static Span<uint8_t> EmitCode(const Graph *graph, CodeAllocator *allocator)
117 {
118     size_t codeOffset = RoundUp(CodePrefix::STRUCT_SIZE, GetCodeAlignment(graph->GetArch()));
119     CodePrefix prefix;
120     prefix.codeSize = graph->GetCode().size();
121     prefix.codeInfoOffset = codeOffset + RoundUp(graph->GetCode().size(), sizeof(uint32_t));
122     prefix.codeInfoSize = graph->GetCodeInfoData().size();
123     size_t codeSize = prefix.codeInfoOffset + prefix.codeInfoSize;
124     auto memRange = allocator->AllocateCodeUnprotected(codeSize);
125     if (memRange.GetSize() == 0) {
126         return Span<uint8_t> {};
127     }
128 
129     auto data = reinterpret_cast<uint8_t *>(memRange.GetData());
130     memcpy_s(data, sizeof(CodePrefix), &prefix, sizeof(CodePrefix));
131     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
132     memcpy_s(&data[codeOffset], graph->GetCode().size(), graph->GetCode().data(), graph->GetCode().size());
133     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
134     memcpy_s(&data[prefix.codeInfoOffset], graph->GetCodeInfoData().size(), graph->GetCodeInfoData().data(),
135              graph->GetCodeInfoData().size());
136 
137     allocator->ProtectCode(memRange);
138 
139     return Span<uint8_t>(reinterpret_cast<uint8_t *>(memRange.GetData()), codeSize);
140 }
141 
GetEntryPoint(Graph * graph,Method * method,const std::string & methodName,bool isOsr,CodeAllocator * codeAllocator,ArenaAllocator * gdbDebugInfoAllocator,JITStats * jitStats)142 static uint8_t *GetEntryPoint(Graph *graph, [[maybe_unused]] Method *method, const std::string &methodName,
143                               [[maybe_unused]] bool isOsr, CodeAllocator *codeAllocator,
144                               [[maybe_unused]] ArenaAllocator *gdbDebugInfoAllocator, JITStats *jitStats)
145 {
146 #ifdef PANDA_COMPILER_DEBUG_INFO
147     auto generatedData = g_options.IsCompilerEmitDebugInfo()
148                              ? EmitElf(graph, codeAllocator, gdbDebugInfoAllocator, methodName)
149                              : EmitCode(graph, codeAllocator);
150 #else
151     auto generatedData = EmitCode(graph, codeAllocator);
152 #endif
153     if (generatedData.Empty()) {
154         LOG(INFO, COMPILER) << "Compilation failed due to memory allocation fail: " << methodName;
155         return nullptr;
156     }
157     CodeInfo codeInfo(generatedData);
158     LOG(INFO, COMPILER) << "Compiled code for '" << methodName << "' has been installed to "
159                         << bit_cast<void *>(codeInfo.GetCode()) << ", code size " << codeInfo.GetCodeSize();
160 
161     auto entryPoint = const_cast<uint8_t *>(codeInfo.GetCode());
162     EndCompilation(methodName, isOsr, method->GetCodeSize(), reinterpret_cast<uintptr_t>(entryPoint),
163                    codeInfo.GetCodeSize(), codeInfo.GetInfoSize(), events::CompilationStatus::COMPILED, jitStats);
164     return entryPoint;
165 }
166 
167 template <TaskRunnerMode RUNNER_MODE>
RunOptimizations(CompilerTaskRunner<RUNNER_MODE> taskRunner,JITStats * jitStats)168 static void RunOptimizations(CompilerTaskRunner<RUNNER_MODE> taskRunner, JITStats *jitStats)
169 {
170     auto &taskCtx = taskRunner.GetContext();
171     taskCtx.GetGraph()->SetLanguage(taskCtx.GetMethod()->GetClass()->GetSourceLang());
172 
173     taskRunner.AddCallbackOnSuccess([]([[maybe_unused]] CompilerContext<RUNNER_MODE> &compilerCtx) {
174         LOG(DEBUG, COMPILER) << "The method " << compilerCtx.GetMethodName() << " is compiled";
175     });
176     taskRunner.AddCallbackOnFail([jitStats](CompilerContext<RUNNER_MODE> &compilerCtx) {
177         if (!compiler::g_options.IsCompilerIgnoreFailures()) {
178             LOG(FATAL, COMPILER) << "RunOptimizations failed!";
179         }
180         LOG(WARNING, COMPILER) << "RunOptimizations failed!";
181         EndCompilation(compilerCtx.GetMethodName(), compilerCtx.IsOsr(), compilerCtx.GetMethod()->GetCodeSize(), 0, 0,
182                        0, events::CompilationStatus::FAILED, jitStats);
183     });
184 
185     // Run compiler optimizations over created graph
186     RunOptimizations<RUNNER_MODE>(std::move(taskRunner));
187 }
188 
189 template <TaskRunnerMode RUNNER_MODE>
CheckCompilation(RuntimeInterface * runtime,CodeAllocator * codeAllocator,ArenaAllocator * gdbDebugInfoAllocator,JITStats * jitStats,bool isDynamic,Arch arch,CompilerContext<RUNNER_MODE> & compilerCtx)190 static bool CheckCompilation(RuntimeInterface *runtime, CodeAllocator *codeAllocator,
191                              ArenaAllocator *gdbDebugInfoAllocator, JITStats *jitStats, bool isDynamic, Arch arch,
192                              CompilerContext<RUNNER_MODE> &compilerCtx)
193 {
194     auto *graph = compilerCtx.GetGraph();
195 
196     ASSERT(graph != nullptr && graph->GetCode().data() != nullptr);
197 
198     auto &name = compilerCtx.GetMethodName();
199     auto isOsr = compilerCtx.IsOsr();
200     auto *method = compilerCtx.GetMethod();
201 
202     if (!isDynamic && !CheckSingleImplementation(graph)) {
203         EndCompilation(name, isOsr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED_SINGLE_IMPL,
204                        jitStats);
205         return false;
206     }
207 
208     // Drop non-native code in any case
209     if (arch != RUNTIME_ARCH) {
210         EndCompilation(name, isOsr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::DROPPED, jitStats);
211         return false;
212     }
213 
214     auto entryPoint = GetEntryPoint(graph, method, name, isOsr, codeAllocator, gdbDebugInfoAllocator, jitStats);
215     if (entryPoint == nullptr) {
216         return false;
217     }
218     if (isOsr) {
219         if (!runtime->TrySetOsrCode(method, entryPoint)) {
220             // Compiled code has been deoptimized, so we shouldn't install osr code.
221             // NOTE(compiler): release compiled code memory, when CodeAllocator supports freeing the memory.
222             return false;
223         }
224     } else {
225         runtime->SetCompiledEntryPoint(method, entryPoint);
226     }
227     return true;
228 }
229 
230 template <TaskRunnerMode RUNNER_MODE>
JITCompileMethod(RuntimeInterface * runtime,CodeAllocator * codeAllocator,ArenaAllocator * gdbDebugInfoAllocator,JITStats * jitStats,CompilerTaskRunner<RUNNER_MODE> taskRunner)231 void JITCompileMethod(RuntimeInterface *runtime, CodeAllocator *codeAllocator, ArenaAllocator *gdbDebugInfoAllocator,
232                       JITStats *jitStats, CompilerTaskRunner<RUNNER_MODE> taskRunner)
233 {
234     auto &taskCtx = taskRunner.GetContext();
235     auto *taskMethod = taskCtx.GetMethod();
236     taskCtx.SetMethodName(runtime->GetMethodFullName(taskMethod, false));
237     auto &methodName = taskCtx.GetMethodName();
238 
239     SCOPED_TRACE_STREAM << "JIT compiling " << methodName;
240 
241     if (!g_options.MatchesRegex(methodName)) {
242         LOG(DEBUG, COMPILER) << "Skip the method due to regexp mismatch: " << methodName;
243         taskCtx.SetCompilationStatus(false);
244         CompilerTaskRunner<RUNNER_MODE>::EndTask(std::move(taskRunner), false);
245         return;
246     }
247 
248     if (jitStats != nullptr) {
249         jitStats->SetCompilationStart();
250     }
251 
252     taskRunner.AddFinalize([jitStats](CompilerContext<RUNNER_MODE> &compilerCtx) {
253         if (jitStats != nullptr) {
254             // Reset compilation start time in all cases for consistency
255             jitStats->ResetCompilationStart();
256         }
257         auto *graph = compilerCtx.GetGraph();
258         if (graph != nullptr) {
259             graph->~Graph();
260         }
261     });
262 
263     auto arch = ChooseArch(Arch::NONE);
264     bool isDynamic = ark::panda_file::IsDynamicLanguage(taskMethod->GetClass()->GetSourceLang());
265 
266     taskRunner.AddCallbackOnSuccess([runtime, codeAllocator, gdbDebugInfoAllocator, jitStats, isDynamic,
267                                      arch](CompilerContext<RUNNER_MODE> &compilerCtx) {
268         bool compilationStatus = CheckCompilation<RUNNER_MODE>(runtime, codeAllocator, gdbDebugInfoAllocator, jitStats,
269                                                                isDynamic, arch, compilerCtx);
270         compilerCtx.SetCompilationStatus(compilationStatus);
271     });
272     taskRunner.AddCallbackOnFail(
273         [](CompilerContext<RUNNER_MODE> &compilerCtx) { compilerCtx.SetCompilationStatus(false); });
274 
275     CompileInGraph<RUNNER_MODE>(runtime, isDynamic, arch, std::move(taskRunner), jitStats);
276 }
277 
278 template <TaskRunnerMode RUNNER_MODE>
CompileInGraph(RuntimeInterface * runtime,bool isDynamic,Arch arch,CompilerTaskRunner<RUNNER_MODE> taskRunner,JITStats * jitStats)279 void CompileInGraph(RuntimeInterface *runtime, bool isDynamic, Arch arch, CompilerTaskRunner<RUNNER_MODE> taskRunner,
280                     JITStats *jitStats)
281 {
282     auto &taskCtx = taskRunner.GetContext();
283     auto isOsr = taskCtx.IsOsr();
284     auto *method = taskCtx.GetMethod();
285     auto &methodName = taskCtx.GetMethodName();
286 
287     LOG(INFO, COMPILER) << "Compile method" << (isOsr ? "(OSR)" : "") << ": " << methodName << " ("
288                         << runtime->GetFileName(method) << ')';
289 
290     if (arch == Arch::NONE || !BackendSupport(arch)) {
291         LOG(DEBUG, COMPILER) << "Compilation unsupported for this platform!";
292         CompilerTaskRunner<RUNNER_MODE>::EndTask(std::move(taskRunner), false);
293         return;
294     }
295 
296     auto *allocator = taskCtx.GetAllocator();
297     auto *localAllocator = taskCtx.GetLocalAllocator();
298     auto *graph =
299         allocator->template New<Graph>(allocator, localAllocator, arch, method, runtime, isOsr, nullptr, isDynamic);
300     taskCtx.SetGraph(graph);
301     if (graph == nullptr) {
302         LOG(ERROR, COMPILER) << "Creating graph failed!";
303         EndCompilation(methodName, isOsr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED, jitStats);
304         CompilerTaskRunner<RUNNER_MODE>::EndTask(std::move(taskRunner), false);
305         return;
306     }
307 
308     taskRunner.SetTaskOnSuccess([jitStats](CompilerTaskRunner<RUNNER_MODE> nextRunner) {
309         RunOptimizations<RUNNER_MODE>(std::move(nextRunner), jitStats);
310     });
311 
312     bool success = graph->template RunPass<IrBuilder>();
313     if (!success) {
314         if (!compiler::g_options.IsCompilerIgnoreFailures()) {
315             LOG(FATAL, COMPILER) << "IrBuilder failed!";
316         }
317         LOG(WARNING, COMPILER) << "IrBuilder failed!";
318         EndCompilation(methodName, isOsr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED, jitStats);
319     };
320     CompilerTaskRunner<RUNNER_MODE>::EndTask(std::move(taskRunner), success);
321 }
322 
323 template void JITCompileMethod<BACKGROUND_MODE>(RuntimeInterface *, CodeAllocator *, ArenaAllocator *, JITStats *,
324                                                 CompilerTaskRunner<BACKGROUND_MODE>);
325 template void JITCompileMethod<INPLACE_MODE>(RuntimeInterface *, CodeAllocator *, ArenaAllocator *, JITStats *,
326                                              CompilerTaskRunner<INPLACE_MODE>);
327 template void CompileInGraph<BACKGROUND_MODE>(RuntimeInterface *, bool, Arch, CompilerTaskRunner<BACKGROUND_MODE>,
328                                               JITStats *);
329 template void CompileInGraph<INPLACE_MODE>(RuntimeInterface *, bool, Arch, CompilerTaskRunner<INPLACE_MODE>,
330                                            JITStats *);
331 template void RunOptimizations<BACKGROUND_MODE>(CompilerTaskRunner<BACKGROUND_MODE>, JITStats *);
332 template void RunOptimizations<INPLACE_MODE>(CompilerTaskRunner<INPLACE_MODE>, JITStats *);
333 template bool CheckCompilation<BACKGROUND_MODE>(RuntimeInterface *, CodeAllocator *, ArenaAllocator *, JITStats *, bool,
334                                                 Arch, CompilerContext<BACKGROUND_MODE> &);
335 template bool CheckCompilation<INPLACE_MODE>(RuntimeInterface *, CodeAllocator *, ArenaAllocator *, JITStats *, bool,
336                                              Arch, CompilerContext<INPLACE_MODE> &);
337 
338 }  // namespace ark::compiler
339 
340 #ifdef PANDA_COMPILER_DEBUG_INFO
341 
342 #include "optimizer/ir/aot_data.h"
343 #include "tools/debug/jit_writer.h"
344 
345 // Next "C"-code need for enable interaction with gdb
346 // Please read "JIT Compilation Interface" from gdb-documentation for more information
347 extern "C" {
348 // Gdb will replace implementation of this function
349 // NOLINTNEXTLINE(readability-identifier-naming)
__jit_debug_register_code(void)350 void NO_INLINE __jit_debug_register_code(void)
351 {
352     // NOLINTNEXTLINE(hicpp-no-assembler)
353     asm("");
354 }
355 
356 // Default version for descriptor (may be checked before register code)
357 // NOLINTNEXTLINE(modernize-use-nullptr, readability-identifier-naming)
358 jit_descriptor __jit_debug_descriptor = {1, JIT_NOACTION, NULL, NULL};
359 }  // extern "C"
360 
361 namespace ark::compiler {
362 
363 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
364 static os::memory::Mutex g_jitDebugLock;
365 
366 // Will register jit-elf description in linked list
RegisterJitCode(jit_code_entry * entry)367 static void RegisterJitCode(jit_code_entry *entry)
368 {
369     ASSERT(g_options.IsCompilerEmitDebugInfo());
370 
371     os::memory::LockHolder lock(g_jitDebugLock);
372     // Re-link list
373     entry->nextEntry = __jit_debug_descriptor.firstEntry;
374     if (__jit_debug_descriptor.firstEntry != nullptr) {
375         __jit_debug_descriptor.firstEntry->prevEntry = entry;
376     }
377     __jit_debug_descriptor.firstEntry = entry;
378 
379     // Fill last entry
380     __jit_debug_descriptor.relevantEntry = entry;
381     __jit_debug_descriptor.actionFlag = JIT_REGISTER_FN;
382 
383     // Call gdb-callback
384     __jit_debug_register_code();
385     __jit_debug_descriptor.actionFlag = JIT_NOACTION;
386     __jit_debug_descriptor.relevantEntry = nullptr;
387 }
388 
389 // When code allocator cleaned - also will clean entry
CleanJitDebugCode()390 void CleanJitDebugCode()
391 {
392     ASSERT(g_options.IsCompilerEmitDebugInfo());
393 
394     os::memory::LockHolder lock(g_jitDebugLock);
395     __jit_debug_descriptor.actionFlag = JIT_UNREGISTER_FN;
396 
397     while (__jit_debug_descriptor.firstEntry != nullptr) {
398         __jit_debug_descriptor.firstEntry->prevEntry = nullptr;
399         __jit_debug_descriptor.relevantEntry = __jit_debug_descriptor.firstEntry;
400         // Call gdb-callback
401         __jit_debug_register_code();
402 
403         __jit_debug_descriptor.firstEntry = __jit_debug_descriptor.firstEntry->nextEntry;
404     }
405 
406     __jit_debug_descriptor.actionFlag = JIT_NOACTION;
407     __jit_debug_descriptor.relevantEntry = nullptr;
408 }
409 
410 // For each jit code - will generate small elf description and put them in gdb-special linked list.
EmitElf(Graph * graph,CodeAllocator * codeAllocator,ArenaAllocator * gdbDebugInfoAllocator,const std::string & methodName)411 static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *codeAllocator, ArenaAllocator *gdbDebugInfoAllocator,
412                              const std::string &methodName)
413 {
414     ASSERT(g_options.IsCompilerEmitDebugInfo());
415 
416     if (graph->GetCode().Empty()) {
417         return {};
418     }
419 
420     JitDebugWriter jitWriter(graph->GetArch(), graph->GetRuntime(), codeAllocator, methodName);
421 
422     jitWriter.Start();
423 
424     auto method = reinterpret_cast<Method *>(graph->GetMethod());
425     auto klass = reinterpret_cast<Class *>(graph->GetRuntime()->GetClass(method));
426     jitWriter.StartClass(*klass);
427 
428     CompiledMethod compiledMethod(graph->GetArch(), method, 0);
429     compiledMethod.SetCode(graph->GetCode().ToConst());
430     compiledMethod.SetCodeInfo(graph->GetCodeInfoData());
431     compiledMethod.SetCfiInfo(graph->GetCallingConvention()->GetCfiInfo());
432 
433     jitWriter.AddMethod(compiledMethod);
434     jitWriter.EndClass();
435     jitWriter.End();
436     if (!jitWriter.Write()) {
437         return {};
438     }
439 
440     auto gdbEntry {gdbDebugInfoAllocator->New<jit_code_entry>()};
441     if (gdbEntry == nullptr) {
442         return {};
443     }
444 
445     auto elfFile {jitWriter.GetElf()};
446     // Pointer to Elf-file entry
447     gdbEntry->symfileAddr = reinterpret_cast<const char *>(elfFile.Data());
448     // Elf-in-memory file size
449     gdbEntry->symfileSize = elfFile.Size();
450     gdbEntry->prevEntry = nullptr;
451 
452     RegisterJitCode(gdbEntry);
453     return jitWriter.GetCode();
454 }
455 
456 }  // namespace ark::compiler
457 #endif
458