• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "optimizer_run.h"
17 #include "mem/pool_manager.h"
18 #include "mem/code_allocator.h"
19 #include "include/class.h"
20 #include "include/method.h"
21 #include "optimizer/ir/ir_constructor.h"
22 #include "optimizer/ir/runtime_interface.h"
23 #include "optimizer/analysis/loop_analyzer.h"
24 #include "optimizer/pass.h"
25 #include "optimizer/ir_builder/ir_builder.h"
26 #include "utils/logger.h"
27 #include "code_info/code_info.h"
28 #include "events/events.h"
29 #include "trace/trace.h"
30 #include "optimizer/code_generator/codegen.h"
31 #include "compile_method.h"
32 
33 namespace panda::compiler {
34 
35 #ifdef PANDA_COMPILER_CFI
36 static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *code_allocator, ArenaAllocator *gdb_debug_info_allocator,
37                              const std::string &method_name);
38 #endif
39 
ChooseArch(Arch arch)40 static Arch ChooseArch(Arch arch)
41 {
42     if (arch != Arch::NONE) {
43         return arch;
44     }
45 
46     arch = RUNTIME_ARCH;
47     if (RUNTIME_ARCH == Arch::X86_64 && options.WasSetCompilerCrossArch()) {
48         arch = GetArchFromString(options.GetCompilerCrossArch());
49     }
50 
51     return arch;
52 }
53 
CheckSingleImplementation(Graph * graph)54 static bool CheckSingleImplementation(Graph *graph)
55 {
56     // Check that all methods that were inlined due to its single implementation property, still have this property,
57     // otherwise we must drop compiled code.
58     // TODO(compiler): we need to reset hotness counter hereby avoid yet another warmup phase.
59     auto cha = graph->GetRuntime()->GetCha();
60     for (auto si_method : graph->GetSingleImplementationList()) {
61         if (!cha->IsSingleImplementation(si_method)) {
62             LOG(WARNING, COMPILER)
63                 << "Method lost single-implementation property after compilation, so we need to drop "
64                    "whole compiled code: "
65                 << graph->GetRuntime()->GetMethodFullName(si_method);
66             return false;
67         }
68     }
69     return true;
70 }
71 
EmitCode(const Graph * graph,CodeAllocator * allocator)72 static Span<uint8_t> EmitCode(const Graph *graph, CodeAllocator *allocator)
73 {
74     size_t code_offset = RoundUp(CodePrefix::STRUCT_SIZE, GetCodeAlignment(graph->GetArch()));
75     CodePrefix prefix;
76     prefix.code_size = graph->GetData().size();
77     prefix.code_info_offset = code_offset + RoundUp(graph->GetData().size(), sizeof(uint32_t));
78     prefix.code_info_size = graph->GetCodeInfoData().size();
79     size_t code_size = prefix.code_info_offset + prefix.code_info_size;
80     auto mem_range = allocator->AllocateCodeUnprotected(code_size);
81     if (mem_range.GetSize() == 0) {
82         return Span<uint8_t> {};
83     }
84 
85     auto data = reinterpret_cast<uint8_t *>(mem_range.GetData());
86     memcpy_s(data, sizeof(CodePrefix), &prefix, sizeof(CodePrefix));
87     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
88     memcpy_s(&data[code_offset], graph->GetData().size(), graph->GetData().data(), graph->GetData().size());
89     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
90     memcpy_s(&data[prefix.code_info_offset], graph->GetCodeInfoData().size(), graph->GetCodeInfoData().data(),
91              graph->GetCodeInfoData().size());
92 
93     allocator->ProtectCode(mem_range);
94 
95     return Span<uint8_t>(reinterpret_cast<uint8_t *>(mem_range.GetData()), code_size);
96 }
97 
GetEntryPoint(Graph * graph,Method * method,const std::string & method_name,bool is_osr,CodeAllocator * code_allocator,ArenaAllocator * gdb_debug_info_allocator)98 uint8_t *GetEntryPoint(Graph *graph, [[maybe_unused]] Method *method, const std::string &method_name,
99                        [[maybe_unused]] bool is_osr, CodeAllocator *code_allocator,
100                        [[maybe_unused]] ArenaAllocator *gdb_debug_info_allocator)
101 {
102 #ifdef PANDA_COMPILER_CFI
103     auto generated_data = options.IsCompilerEmitDebugInfo()
104                               ? EmitElf(graph, code_allocator, gdb_debug_info_allocator, method_name)
105                               : EmitCode(graph, code_allocator);
106 #else
107     auto generated_data = EmitCode(graph, code_allocator);
108 #endif
109     if (generated_data.Empty()) {
110         LOG(INFO, COMPILER) << "Compilation failed due to memory allocation fail: " << method_name;
111         return nullptr;
112     }
113     CodeInfo code_info(generated_data);
114     LOG(INFO, COMPILER) << "Compiled code for '" << method_name << "' has been installed to "
115                         << bit_cast<void *>(code_info.GetCode()) << ", code size " << code_info.GetCodeSize();
116 
117     auto entry_point = const_cast<uint8_t *>(code_info.GetCode());
118     EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), reinterpret_cast<uintptr_t>(entry_point),
119                       code_info.GetCodeSize(), code_info.GetInfoSize(), events::CompilationStatus::COMPILED);
120     return entry_point;
121 }
122 
JITCompileMethod(RuntimeInterface * runtime,Method * method,bool is_osr,CodeAllocator * code_allocator,ArenaAllocator * allocator,ArenaAllocator * local_allocator,ArenaAllocator * gdb_debug_info_allocator)123 bool JITCompileMethod(RuntimeInterface *runtime, Method *method, bool is_osr, CodeAllocator *code_allocator,
124                       ArenaAllocator *allocator, ArenaAllocator *local_allocator,
125                       ArenaAllocator *gdb_debug_info_allocator)
126 {
127     std::string method_name = runtime->GetMethodFullName(method, false);
128     SCOPED_TRACE_STREAM << "JIT compiling " << method_name;
129 
130     if (!options.MatchesRegex(method_name)) {
131         LOG(DEBUG, COMPILER) << "Skip the method due to regexp mismatch: " << method_name;
132         return false;
133     }
134 
135     Graph *graph {nullptr};
136     auto finalizer = [&graph]([[maybe_unused]] void *ptr) {
137         if (graph != nullptr) {
138             graph->~Graph();
139         }
140     };
141     std::unique_ptr<void, decltype(finalizer)> fin(&finalizer, finalizer);
142 
143     auto arch {Arch::NONE};
144     bool is_dynamic = panda::panda_file::IsDynamicLanguage(method->GetClass()->GetSourceLang());
145 
146     if (!CompileInGraph(runtime, method, is_osr, allocator, local_allocator, is_dynamic, &arch, method_name, &graph)) {
147         return false;
148     }
149     ASSERT(graph != nullptr && graph->GetData().data() != nullptr);
150 
151     if (!is_dynamic && !CheckSingleImplementation(graph)) {
152         EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0,
153                           events::CompilationStatus::FAILED_SINGLE_IMPL);
154         return false;
155     }
156 
157     // Drop non-native code in any case
158     if (arch != RUNTIME_ARCH) {
159         EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::DROPPED);
160         return false;
161     }
162 
163     auto entry_point = GetEntryPoint(graph, method, method_name, is_osr, code_allocator, gdb_debug_info_allocator);
164     if (entry_point == nullptr) {
165         return false;
166     }
167     if (is_osr) {
168         if (runtime->HasCompiledCode(method)) {
169             runtime->SetOsrCode(method, entry_point);
170             ASSERT(runtime->GetOsrCode(method) != nullptr);
171         } else {
172             // Compiled code has been deoptimized, so we shouldn't install osr code.
173             // TODO(compiler): release compiled code memory, when CodeAllocator supports freeing the memory.
174             return false;
175         }
176     } else {
177         runtime->SetCompiledEntryPoint(method, entry_point);
178     }
179     ASSERT(graph != nullptr);
180     return true;
181 }
182 
CompileInGraph(RuntimeInterface * runtime,Method * method,bool is_osr,ArenaAllocator * allocator,ArenaAllocator * local_allocator,bool is_dynamic,Arch * arch,const std::string & method_name,Graph ** graph)183 bool CompileInGraph(RuntimeInterface *runtime, Method *method, bool is_osr, ArenaAllocator *allocator,
184                     ArenaAllocator *local_allocator, bool is_dynamic, Arch *arch, const std::string &method_name,
185                     Graph **graph)
186 {
187     LOG(INFO, COMPILER) << "Compile method" << (is_osr ? "(OSR)" : "") << ": " << method_name << " ("
188                         << runtime->GetFileName(method) << ')';
189     *arch = ChooseArch(*arch);
190     if (*arch == Arch::NONE || !BackendSupport(*arch)) {
191         LOG(DEBUG, COMPILER) << "Compilation unsupported for this platform!";
192         return false;
193     }
194 
195     ASSERT(*graph == nullptr);
196     *graph = allocator->New<Graph>(allocator, local_allocator, *arch, method, runtime, is_osr, nullptr, is_dynamic);
197     if (*graph == nullptr) {
198         LOG(ERROR, COMPILER) << "Creating graph failed!";
199         EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
200         return false;
201     }
202 
203     if (!(*graph)->RunPass<IrBuilder>()) {
204         if (!compiler::options.IsCompilerIgnoreFailures()) {
205             LOG(FATAL, COMPILER) << "IrBuilder failed!";
206         }
207         LOG(WARNING, COMPILER) << "IrBuilder failed!";
208         EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
209         return false;
210     }
211 
212     // Run compiler optimizations over created graph
213     bool res = RunOptimizations(*graph);
214     if (!res) {
215         if (!compiler::options.IsCompilerIgnoreFailures()) {
216             LOG(FATAL, COMPILER) << "RunOptimizations failed!";
217         }
218         LOG(WARNING, COMPILER) << "RunOptimizations failed!";
219         EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
220         return false;
221     }
222 
223     LOG(DEBUG, COMPILER) << "The method is compiled";
224 
225     return true;
226 }
227 }  // namespace panda::compiler
228 
229 #ifdef PANDA_COMPILER_CFI
230 
231 #include "optimizer/ir/aot_data.h"
232 #include "tools/debug/jit_writer.h"
233 
234 // Next "C"-code need for enable interaction with gdb
235 // Please read "JIT Compilation Interface" from gdb-documentation for more information
236 extern "C" {
237 // Gdb will replace implementation of this function
__jit_debug_register_code(void)238 void NO_INLINE __jit_debug_register_code(void)
239 {
240     // NOLINTNEXTLINE(hicpp-no-assembler)
241     asm("");
242 }
243 
244 // Default version for descriptor (may be checked before register code)
245 // NOLINTNEXTLINE(modernize-use-nullptr)
246 jit_descriptor __jit_debug_descriptor = {1, JIT_NOACTION, NULL, NULL};
247 }  // extern "C"
248 
249 namespace panda::compiler {
250 
251 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
252 static os::memory::Mutex jit_debug_lock;
253 
254 // Will register jit-elf description in linked list
RegisterJitCode(jit_code_entry * entry)255 static void RegisterJitCode(jit_code_entry *entry)
256 {
257     ASSERT(options.IsCompilerEmitDebugInfo());
258 
259     os::memory::LockHolder lock(jit_debug_lock);
260     // Re-link list
261     entry->next_entry = __jit_debug_descriptor.first_entry;
262     if (__jit_debug_descriptor.first_entry != nullptr) {
263         __jit_debug_descriptor.first_entry->prev_entry = entry;
264     }
265     __jit_debug_descriptor.first_entry = entry;
266 
267     // Fill last entry
268     __jit_debug_descriptor.relevant_entry = entry;
269     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
270 
271     // Call gdb-callback
272     __jit_debug_register_code();
273     __jit_debug_descriptor.action_flag = JIT_NOACTION;
274     __jit_debug_descriptor.relevant_entry = nullptr;
275 }
276 
277 // When code allocator cleaned - also will clean entry
CleanJitDebugCode()278 void CleanJitDebugCode()
279 {
280     ASSERT(options.IsCompilerEmitDebugInfo());
281 
282     os::memory::LockHolder lock(jit_debug_lock);
283     __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
284 
285     while (__jit_debug_descriptor.first_entry != nullptr) {
286         __jit_debug_descriptor.first_entry->prev_entry = nullptr;
287         __jit_debug_descriptor.relevant_entry = __jit_debug_descriptor.first_entry;
288         // Call gdb-callback
289         __jit_debug_register_code();
290 
291         __jit_debug_descriptor.first_entry = __jit_debug_descriptor.first_entry->next_entry;
292     }
293 
294     __jit_debug_descriptor.action_flag = JIT_NOACTION;
295     __jit_debug_descriptor.relevant_entry = nullptr;
296 }
297 
298 // For each jit code - will generate small elf description and put them in gdb-special linked list.
EmitElf(Graph * graph,CodeAllocator * code_allocator,ArenaAllocator * gdb_debug_info_allocator,const std::string & method_name)299 static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *code_allocator, ArenaAllocator *gdb_debug_info_allocator,
300                              const std::string &method_name)
301 {
302     ASSERT(options.IsCompilerEmitDebugInfo());
303 
304     if (graph->GetData().Empty()) {
305         return {};
306     }
307 
308     JitDebugWriter jit_writer(graph->GetArch(), graph->GetRuntime(), code_allocator, method_name);
309 
310     jit_writer.Start();
311 
312     auto method = reinterpret_cast<Method *>(graph->GetMethod());
313     auto klass = reinterpret_cast<Class *>(graph->GetRuntime()->GetClass(method));
314     jit_writer.StartClass(*klass);
315 
316     CompiledMethod compiled_method(graph->GetArch(), method);
317     compiled_method.SetCode(graph->GetData().ToConst());
318     compiled_method.SetCodeInfo(graph->GetCodeInfoData());
319     compiled_method.SetCfiInfo(graph->GetCallingConvention()->GetCfiInfo());
320 
321     jit_writer.AddMethod(compiled_method, 0);
322     jit_writer.EndClass();
323     jit_writer.End();
324     if (!jit_writer.Write()) {
325         return {};
326     }
327 
328     auto gdb_entry {gdb_debug_info_allocator->New<jit_code_entry>()};
329     if (gdb_entry == nullptr) {
330         return {};
331     }
332 
333     auto elf_file {jit_writer.GetElf()};
334     // Pointer to Elf-file entry
335     gdb_entry->symfile_addr = reinterpret_cast<const char *>(elf_file.Data());
336     // Elf-in-memory file size
337     gdb_entry->symfile_size = elf_file.Size();
338     gdb_entry->prev_entry = nullptr;
339 
340     RegisterJitCode(gdb_entry);
341     return jit_writer.GetCode();
342 }
343 
344 }  // namespace panda::compiler
345 #endif
346