1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/compiler/codegen/llvm/llvm_codegen.h"
17 #if defined(PANDA_TARGET_MACOS) || defined(PANDA_TARGET_IOS)
18 #include "ecmascript/base/llvm_helper.h"
19 #endif
20
21
22 #if defined(__clang__)
23 #pragma clang diagnostic push
24 #pragma clang diagnostic ignored "-Wshadow"
25 #pragma clang diagnostic ignored "-Wunused-parameter"
26 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
27 #elif defined(__GNUC__)
28 #pragma GCC diagnostic push
29 #pragma GCC diagnostic ignored "-Wshadow"
30 #pragma GCC diagnostic ignored "-Wunused-parameter"
31 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
32 #endif
33
34 #include "llvm-c/Analysis.h"
35 #include "llvm-c/Disassembler.h"
36 #include "llvm-c/Transforms/PassManagerBuilder.h"
37 #include "llvm/DebugInfo/DWARF/DWARFContext.h"
38 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
39 #include "llvm/ExecutionEngine/MCJIT.h"
40 #include "llvm/IR/LegacyPassManager.h"
41 #include "lib/llvm_interface.h"
42
43 #include "ecmascript/compiler/aot_file/aot_file_info.h"
44 #include "ecmascript/compiler/codegen/llvm/llvm_ir_builder.h"
45 #include "ecmascript/compiler/compiler_log.h"
46
47 #if defined(__clang__)
48 #pragma clang diagnostic pop
49 #elif defined(__GNUC__)
50 #pragma GCC diagnostic pop
51 #endif
52
53 namespace panda::ecmascript::kungfu {
54 using namespace panda::ecmascript;
55 using namespace llvm;
56
CodeInfo(CodeSpaceOnDemand & codeSpaceOnDemand)57 CodeInfo::CodeInfo(CodeSpaceOnDemand &codeSpaceOnDemand) : codeSpaceOnDemand_(codeSpaceOnDemand)
58 {
59 secInfos_.fill(std::make_pair(nullptr, 0));
60 }
61
~CodeInfo()62 CodeInfo::~CodeInfo()
63 {
64 Reset();
65 }
66
GetInstance()67 CodeInfo::CodeSpace *CodeInfo::CodeSpace::GetInstance()
68 {
69 static CodeSpace *codeSpace = new CodeSpace();
70 return codeSpace;
71 }
72
CodeSpace()73 CodeInfo::CodeSpace::CodeSpace()
74 {
75 ASSERT(REQUIRED_SECS_LIMIT == AlignUp(REQUIRED_SECS_LIMIT, PageSize()));
76 reqSecs_ = static_cast<uint8_t *>(PageMap(REQUIRED_SECS_LIMIT, PAGE_PROT_READWRITE).GetMem());
77 if (reqSecs_ == reinterpret_cast<uint8_t *>(-1)) {
78 reqSecs_ = nullptr;
79 }
80 ASSERT(UNREQUIRED_SECS_LIMIT == AlignUp(UNREQUIRED_SECS_LIMIT, PageSize()));
81 unreqSecs_ = static_cast<uint8_t *>(PageMap(UNREQUIRED_SECS_LIMIT, PAGE_PROT_READWRITE).GetMem());
82 if (unreqSecs_ == reinterpret_cast<uint8_t *>(-1)) {
83 unreqSecs_ = nullptr;
84 }
85 }
86
~CodeSpace()87 CodeInfo::CodeSpace::~CodeSpace()
88 {
89 reqBufPos_ = 0;
90 unreqBufPos_ = 0;
91 if (reqSecs_ != nullptr) {
92 PageUnmap(MemMap(reqSecs_, REQUIRED_SECS_LIMIT));
93 }
94 reqSecs_ = nullptr;
95 if (unreqSecs_ != nullptr) {
96 PageUnmap(MemMap(unreqSecs_, UNREQUIRED_SECS_LIMIT));
97 }
98 unreqSecs_ = nullptr;
99 }
100
Alloca(uintptr_t size,bool isReq,size_t alignSize)101 uint8_t *CodeInfo::CodeSpace::Alloca(uintptr_t size, bool isReq, size_t alignSize)
102 {
103 uint8_t *addr = nullptr;
104 auto bufBegin = isReq ? reqSecs_ : unreqSecs_;
105 auto &curPos = isReq ? reqBufPos_ : unreqBufPos_;
106 size_t limit = isReq ? REQUIRED_SECS_LIMIT : UNREQUIRED_SECS_LIMIT;
107 if (curPos + size > limit) {
108 LOG_COMPILER(ERROR) << std::hex << "Alloca Section failed. Current curPos:" << curPos
109 << " plus size:" << size << "exceed limit:" << limit;
110 exit(-1);
111 }
112 if (alignSize > 0) {
113 curPos = AlignUp(curPos, alignSize);
114 }
115 addr = bufBegin + curPos;
116 curPos += size;
117 return addr;
118 }
119
Alloca(uintptr_t size,bool isReq,size_t alignSize)120 uint8_t *CodeInfo::CodeSpaceOnDemand::Alloca(uintptr_t size, [[maybe_unused]] bool isReq, size_t alignSize)
121 {
122 // Always apply for an aligned memory block here.
123 auto alignedSize = alignSize > 0 ? AlignUp(size, alignSize) : size;
124 // Verify the size and temporarily use REQUIREd_SECS.LIMITED as the online option, allowing for adjustments.
125 if (alignedSize > SECTION_LIMIT) {
126 LOG_COMPILER(FATAL) << std::hex << "invalid memory size: " << alignedSize;
127 return nullptr;
128 }
129 uint8_t *addr = static_cast<uint8_t *>(malloc(alignedSize));
130 if (addr == nullptr) {
131 LOG_COMPILER(FATAL) << "malloc section failed.";
132 return nullptr;
133 }
134 sections_.push_back({addr, alignedSize});
135 return addr;
136 }
137
~CodeSpaceOnDemand()138 CodeInfo::CodeSpaceOnDemand::~CodeSpaceOnDemand()
139 {
140 // release all used memory.
141 for (auto §ion : sections_) {
142 if ((section.first != nullptr) && (section.second != 0)) {
143 free(section.first);
144 }
145 }
146 sections_.clear();
147 }
148
AllocaOnDemand(uintptr_t size,size_t alignSize)149 uint8_t *CodeInfo::AllocaOnDemand(uintptr_t size, size_t alignSize)
150 {
151 return codeSpaceOnDemand_.Alloca(size, true, alignSize);
152 }
153
AllocaInReqSecBuffer(uintptr_t size,size_t alignSize)154 uint8_t *CodeInfo::AllocaInReqSecBuffer(uintptr_t size, size_t alignSize)
155 {
156 return CodeSpace::GetInstance()->Alloca(size, true, alignSize);
157 }
158
AllocaInNotReqSecBuffer(uintptr_t size,size_t alignSize)159 uint8_t *CodeInfo::AllocaInNotReqSecBuffer(uintptr_t size, size_t alignSize)
160 {
161 return CodeSpace::GetInstance()->Alloca(size, false, alignSize);
162 }
163
AllocaCodeSectionImp(uintptr_t size,const char * sectionName,AllocaSectionCallback allocaInReqSecBuffer)164 uint8_t *CodeInfo::AllocaCodeSectionImp(uintptr_t size, const char *sectionName,
165 AllocaSectionCallback allocaInReqSecBuffer)
166 {
167 uint8_t *addr = nullptr;
168 auto curSec = ElfSection(sectionName);
169 if (curSec.isValidAOTSec()) {
170 if (!alreadyPageAlign_) {
171 addr = (this->*allocaInReqSecBuffer)(size, AOTFileInfo::PAGE_ALIGN);
172 alreadyPageAlign_ = true;
173 } else {
174 addr = (this->*allocaInReqSecBuffer)(size, AOTFileInfo::TEXT_SEC_ALIGN);
175 }
176 } else {
177 addr = (this->*allocaInReqSecBuffer)(size, 0);
178 }
179 codeInfo_.push_back({addr, size});
180 if (curSec.isValidAOTSec()) {
181 secInfos_[curSec.GetIntIndex()] = std::make_pair(addr, size);
182 }
183 return addr;
184 }
185
AllocaCodeSection(uintptr_t size,const char * sectionName)186 uint8_t *CodeInfo::AllocaCodeSection(uintptr_t size, const char *sectionName)
187 {
188 return AllocaCodeSectionImp(size, sectionName, &CodeInfo::AllocaInReqSecBuffer);
189 }
190
AllocaCodeSectionOnDemand(uintptr_t size,const char * sectionName)191 uint8_t *CodeInfo::AllocaCodeSectionOnDemand(uintptr_t size, const char *sectionName)
192 {
193 return AllocaCodeSectionImp(size, sectionName, &CodeInfo::AllocaOnDemand);
194 }
195
AllocaDataSectionImp(uintptr_t size,const char * sectionName,AllocaSectionCallback allocaInReqSecBuffer,AllocaSectionCallback allocaInNotReqSecBuffer)196 uint8_t *CodeInfo::AllocaDataSectionImp(uintptr_t size, const char *sectionName,
197 AllocaSectionCallback allocaInReqSecBuffer,
198 AllocaSectionCallback allocaInNotReqSecBuffer)
199 {
200 uint8_t *addr = nullptr;
201 auto curSec = ElfSection(sectionName);
202 // rodata section needs 16 bytes alignment
203 if (curSec.InRodataSection()) {
204 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_REGION));
205 if (!alreadyPageAlign_) {
206 addr = curSec.isSequentialAOTSec() ? (this->*allocaInReqSecBuffer)(size, AOTFileInfo::PAGE_ALIGN)
207 : (this->*allocaInNotReqSecBuffer)(size, AOTFileInfo::PAGE_ALIGN);
208 alreadyPageAlign_ = true;
209 } else {
210 uint32_t alignedSize = curSec.InRodataSection() ? AOTFileInfo::RODATA_SEC_ALIGN
211 : AOTFileInfo::DATA_SEC_ALIGN;
212 addr = curSec.isSequentialAOTSec() ? (this->*allocaInReqSecBuffer)(size, alignedSize)
213 : (this->*allocaInNotReqSecBuffer)(size, alignedSize);
214 }
215 } else {
216 addr = curSec.isSequentialAOTSec() ? (this->*allocaInReqSecBuffer)(size, 0)
217 : (this->*allocaInNotReqSecBuffer)(size, 0);
218 }
219 if (curSec.isValidAOTSec()) {
220 secInfos_[curSec.GetIntIndex()] = std::make_pair(addr, size);
221 }
222 return addr;
223
224 }
225
AllocaDataSection(uintptr_t size,const char * sectionName)226 uint8_t *CodeInfo::AllocaDataSection(uintptr_t size, const char *sectionName)
227 {
228 return AllocaDataSectionImp(size, sectionName, &CodeInfo::AllocaInReqSecBuffer, &CodeInfo::AllocaInNotReqSecBuffer);
229 }
230
AllocaDataSectionOnDemand(uintptr_t size,const char * sectionName)231 uint8_t *CodeInfo::AllocaDataSectionOnDemand(uintptr_t size, const char *sectionName)
232 {
233 return AllocaDataSectionImp(size, sectionName, &CodeInfo::AllocaOnDemand, &CodeInfo::AllocaOnDemand);
234 }
235
SaveFunc2Addr(std::string funcName,uint32_t address)236 void CodeInfo::SaveFunc2Addr(std::string funcName, uint32_t address)
237 {
238 auto itr = func2FuncInfo.find(funcName);
239 if (itr != func2FuncInfo.end()) {
240 itr->second.addr = address;
241 return;
242 }
243 func2FuncInfo.insert(
244 std::pair<std::string, FuncInfo>(funcName, {address, 0, kungfu::CalleeRegAndOffsetVec()}));
245 }
246
SaveFunc2FPtoPrevSPDelta(std::string funcName,int32_t fp2PrevSpDelta)247 void CodeInfo::SaveFunc2FPtoPrevSPDelta(std::string funcName, int32_t fp2PrevSpDelta)
248 {
249 auto itr = func2FuncInfo.find(funcName);
250 if (itr != func2FuncInfo.end()) {
251 itr->second.fp2PrevFrameSpDelta = fp2PrevSpDelta;
252 return;
253 }
254 func2FuncInfo.insert(
255 std::pair<std::string, FuncInfo>(funcName, {0, fp2PrevSpDelta, kungfu::CalleeRegAndOffsetVec()}));
256 }
257
SaveFunc2CalleeOffsetInfo(std::string funcName,kungfu::CalleeRegAndOffsetVec calleeRegInfo)258 void CodeInfo::SaveFunc2CalleeOffsetInfo(std::string funcName, kungfu::CalleeRegAndOffsetVec calleeRegInfo)
259 {
260 auto itr = func2FuncInfo.find(funcName);
261 if (itr != func2FuncInfo.end()) {
262 itr->second.calleeRegInfo = calleeRegInfo;
263 return;
264 }
265 func2FuncInfo.insert(
266 std::pair<std::string, FuncInfo>(funcName, {0, 0, calleeRegInfo}));
267 }
268
SavePC2DeoptInfo(uint64_t pc,std::vector<uint8_t> deoptInfo)269 void CodeInfo::SavePC2DeoptInfo(uint64_t pc, std::vector<uint8_t> deoptInfo)
270 {
271 pc2DeoptInfo.insert(std::pair<uint64_t, std::vector<uint8_t>>(pc, deoptInfo));
272 }
273
SavePC2CallSiteInfo(uint64_t pc,std::vector<uint8_t> callSiteInfo)274 void CodeInfo::SavePC2CallSiteInfo(uint64_t pc, std::vector<uint8_t> callSiteInfo)
275 {
276 pc2CallsiteInfo.insert(std::pair<uint64_t, std::vector<uint8_t>>(pc, callSiteInfo));
277 }
278
GetFuncInfos() const279 const std::map<std::string, CodeInfo::FuncInfo> &CodeInfo::GetFuncInfos() const
280 {
281 return func2FuncInfo;
282 }
283
GetPC2DeoptInfo() const284 const std::map<uint64_t, std::vector<uint8_t>> &CodeInfo::GetPC2DeoptInfo() const
285 {
286 return pc2DeoptInfo;
287 }
288
GetPC2CallsiteInfo() const289 const std::unordered_map<uint64_t, std::vector<uint8_t>> &CodeInfo::GetPC2CallsiteInfo() const
290 {
291 return pc2CallsiteInfo;
292 }
293
Reset()294 void CodeInfo::Reset()
295 {
296 codeInfo_.clear();
297 }
298
GetSectionAddr(ElfSecName sec) const299 uint8_t *CodeInfo::GetSectionAddr(ElfSecName sec) const
300 {
301 auto curSection = ElfSection(sec);
302 auto idx = curSection.GetIntIndex();
303 return const_cast<uint8_t *>(secInfos_[idx].first);
304 }
305
GetSectionSize(ElfSecName sec) const306 size_t CodeInfo::GetSectionSize(ElfSecName sec) const
307 {
308 auto curSection = ElfSection(sec);
309 auto idx = curSection.GetIntIndex();
310 return secInfos_[idx].second;
311 }
312
GetCodeInfo() const313 std::vector<std::pair<uint8_t *, uintptr_t>> CodeInfo::GetCodeInfo() const
314 {
315 return codeInfo_;
316 }
317
GenerateCodeForStub(Circuit * circuit,const ControlFlowGraph & graph,size_t index,const CompilationConfig * cfg)318 void LLVMIRGeneratorImpl::GenerateCodeForStub(Circuit *circuit, const ControlFlowGraph &graph, size_t index,
319 const CompilationConfig *cfg)
320 {
321 LLVMValueRef function = module_->GetFunction(index);
322 const CallSignature* cs = module_->GetCSign(index);
323 #if ENABLE_NEXT_OPTIMIZATION
324 LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, cs->GetCallConv(), enableLog_, false, cs->GetName(),
325 true);
326 #else
327 LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, cs->GetCallConv(), enableLog_, false, cs->GetName(),
328 false);
329 #endif
330 builder.Build();
331 }
332
GenerateCode(Circuit * circuit,const ControlFlowGraph & graph,const CompilationConfig * cfg,const panda::ecmascript::MethodLiteral * methodLiteral,const JSPandaFile * jsPandaFile,const std::string & methodName,const FrameType frameType,bool enableOptInlining,bool enableOptBranchProfiling)333 void LLVMIRGeneratorImpl::GenerateCode(Circuit *circuit, const ControlFlowGraph &graph, const CompilationConfig *cfg,
334 const panda::ecmascript::MethodLiteral *methodLiteral,
335 const JSPandaFile *jsPandaFile, const std::string &methodName,
336 const FrameType frameType, bool enableOptInlining, bool enableOptBranchProfiling)
337 {
338 auto function = module_->AddFunc(methodLiteral, jsPandaFile);
339 circuit->SetFrameType(frameType);
340 CallSignature::CallConv conv;
341 if (methodLiteral->IsFastCall()) {
342 conv = CallSignature::CallConv::CCallConv;
343 } else {
344 conv = CallSignature::CallConv::WebKitJSCallConv;
345 }
346 LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, conv,
347 enableLog_, methodLiteral->IsFastCall(), methodName,
348 false, enableOptInlining, enableOptBranchProfiling);
349 builder.Build();
350 }
351
RoundTripAllocateCodeSection(void * object,uintptr_t size,unsigned alignment,unsigned sectionID,const char * sectionName)352 static uint8_t *RoundTripAllocateCodeSection(void *object, uintptr_t size, [[maybe_unused]] unsigned alignment,
353 [[maybe_unused]] unsigned sectionID, const char *sectionName)
354 {
355 struct CodeInfo& state = *static_cast<struct CodeInfo*>(object);
356 return state.AllocaCodeSection(size, sectionName);
357 }
358
RoundTripAllocateCodeSectionOnDemand(void * object,uintptr_t size,unsigned alignment,unsigned sectionID,const char * sectionName)359 static uint8_t *RoundTripAllocateCodeSectionOnDemand(void *object, uintptr_t size, [[maybe_unused]] unsigned alignment,
360 [[maybe_unused]] unsigned sectionID, const char *sectionName)
361 {
362 struct CodeInfo& state = *static_cast<struct CodeInfo*>(object);
363 return state.AllocaCodeSectionOnDemand(size, sectionName);
364 }
365
RoundTripAllocateDataSection(void * object,uintptr_t size,unsigned alignment,unsigned sectionID,const char * sectionName,LLVMBool isReadOnly)366 static uint8_t *RoundTripAllocateDataSection(void *object, uintptr_t size, [[maybe_unused]] unsigned alignment,
367 [[maybe_unused]] unsigned sectionID, const char *sectionName,
368 [[maybe_unused]] LLVMBool isReadOnly)
369 {
370 struct CodeInfo& state = *static_cast<struct CodeInfo*>(object);
371 return state.AllocaDataSection(size, sectionName);
372 }
373
RoundTripAllocateDataSectionOnDemand(void * object,uintptr_t size,unsigned alignment,unsigned sectionID,const char * sectionName,LLVMBool isReadOnly)374 static uint8_t *RoundTripAllocateDataSectionOnDemand(void *object, uintptr_t size, [[maybe_unused]] unsigned alignment,
375 [[maybe_unused]] unsigned sectionID, const char *sectionName,
376 [[maybe_unused]] LLVMBool isReadOnly)
377 {
378 ASSERT(object != nullptr);
379 struct CodeInfo& state = *static_cast<struct CodeInfo*>(object);
380 return state.AllocaDataSectionOnDemand(size, sectionName);
381 }
382
RoundTripFinalizeMemory(void * object,char ** errMsg)383 static LLVMBool RoundTripFinalizeMemory([[maybe_unused]] void *object, [[maybe_unused]] char **errMsg)
384 {
385 return 0;
386 }
387
RoundTripDestroy(void * object)388 static void RoundTripDestroy([[maybe_unused]] void *object)
389 {
390 return;
391 }
392
UseRoundTripSectionMemoryManager(bool isJit)393 void LLVMAssembler::UseRoundTripSectionMemoryManager(bool isJit)
394 {
395 auto sectionMemoryManager = std::make_unique<llvm::SectionMemoryManager>();
396 options_.MCJMM = LLVMCreateSimpleMCJITMemoryManager(
397 &codeInfo_, isJit ? RoundTripAllocateCodeSectionOnDemand : RoundTripAllocateCodeSection,
398 isJit ? RoundTripAllocateDataSectionOnDemand : RoundTripAllocateDataSection, RoundTripFinalizeMemory,
399 RoundTripDestroy);
400 }
401
BuildMCJITEngine()402 bool LLVMAssembler::BuildMCJITEngine()
403 {
404 LLVMBool ret = LLVMCreateMCJITCompilerForModule(&engine_, module_, &options_, sizeof(options_), &error_);
405 if (ret) {
406 LOG_COMPILER(FATAL) << "error_ : " << error_;
407 return false;
408 }
409 llvm::unwrap(engine_)->RegisterJITEventListener(&listener_);
410 return true;
411 }
412
BuildAndRunPasses()413 void LLVMAssembler::BuildAndRunPasses()
414 {
415 LLVMPassManagerBuilderRef pmBuilder = LLVMPassManagerBuilderCreate();
416 LLVMPassManagerBuilderSetOptLevel(pmBuilder, options_.OptLevel); // using O3 optimization level
417 LLVMPassManagerBuilderSetSizeLevel(pmBuilder, 0);
418 LLVMPassManagerBuilderSetDisableUnrollLoops(pmBuilder, 0);
419
420 // pass manager creation:rs4gc pass is the only pass in modPass, other opt module-based pass are in modPass1
421 LLVMPassManagerRef funcPass = LLVMCreateFunctionPassManagerForModule(module_);
422 LLVMPassManagerRef modPass = LLVMCreatePassManager();
423 LLVMPassManagerRef modPass1 = LLVMCreatePassManager();
424
425 // add pass into pass managers
426 LLVMPassManagerBuilderPopulateFunctionPassManager(pmBuilder, funcPass);
427 llvm::unwrap(modPass)->add(LLVMCreateRewriteStatepointsForGCLegacyPass()); // rs4gc pass added
428 LLVMPassManagerBuilderPopulateModulePassManager(pmBuilder, modPass1);
429
430 LLVMRunPassManager(modPass, module_);
431 LLVMInitializeFunctionPassManager(funcPass);
432 for (LLVMValueRef fn = LLVMGetFirstFunction(module_); fn; fn = LLVMGetNextFunction(fn)) {
433 LLVMRunFunctionPassManager(funcPass, fn);
434 }
435 LLVMFinalizeFunctionPassManager(funcPass);
436 LLVMRunPassManager(modPass1, module_);
437
438 LLVMPassManagerBuilderDispose(pmBuilder);
439 LLVMDisposePassManager(funcPass);
440 LLVMDisposePassManager(modPass);
441 LLVMDisposePassManager(modPass1);
442 }
443
BuildAndRunPassesFastMode()444 void LLVMAssembler::BuildAndRunPassesFastMode()
445 {
446 LLVMPassManagerBuilderRef pmBuilder = LLVMPassManagerBuilderCreate();
447 LLVMPassManagerBuilderSetOptLevel(pmBuilder, options_.OptLevel); // using O3 optimization level
448 LLVMPassManagerBuilderSetSizeLevel(pmBuilder, 0);
449
450 // pass manager creation:rs4gc pass is the only pass in modPass, other opt module-based pass are in modPass1
451 LLVMPassManagerRef funcPass = LLVMCreateFunctionPassManagerForModule(module_);
452 LLVMPassManagerRef modPass = LLVMCreatePassManager();
453
454 // add pass into pass managers
455 LLVMPassManagerBuilderPopulateFunctionPassManager(pmBuilder, funcPass);
456 llvm::unwrap(modPass)->add(LLVMCreateRewriteStatepointsForGCLegacyPass()); // rs4gc pass added
457
458 LLVMInitializeFunctionPassManager(funcPass);
459 for (LLVMValueRef fn = LLVMGetFirstFunction(module_); fn; fn = LLVMGetNextFunction(fn)) {
460 LLVMRunFunctionPassManager(funcPass, fn);
461 }
462 LLVMFinalizeFunctionPassManager(funcPass);
463 LLVMRunPassManager(modPass, module_);
464
465 LLVMPassManagerBuilderDispose(pmBuilder);
466 LLVMDisposePassManager(funcPass);
467 LLVMDisposePassManager(modPass);
468 }
469
LLVMAssembler(LLVMModule * lm,CodeInfo::CodeSpaceOnDemand & codeSpaceOnDemand,LOptions option)470 LLVMAssembler::LLVMAssembler(LLVMModule *lm, CodeInfo::CodeSpaceOnDemand &codeSpaceOnDemand, LOptions option)
471 : Assembler(codeSpaceOnDemand),
472 llvmModule_(lm),
473 module_(llvmModule_->GetModule()),
474 listener_(this)
475 {
476 Initialize(option);
477 }
478
~LLVMAssembler()479 LLVMAssembler::~LLVMAssembler()
480 {
481 if (engine_ != nullptr) {
482 if (module_ != nullptr) {
483 char *error = nullptr;
484 LLVMRemoveModule(engine_, module_, &module_, &error);
485 if (error != nullptr) {
486 LLVMDisposeMessage(error);
487 }
488 }
489 LLVMDisposeExecutionEngine(engine_);
490 engine_ = nullptr;
491 }
492 module_ = nullptr;
493 error_ = nullptr;
494 }
495
Run(const CompilerLog & log,bool fastCompileMode,bool isJit)496 void LLVMAssembler::Run(const CompilerLog &log, bool fastCompileMode, bool isJit)
497 {
498 char *error = nullptr;
499 std::string originName = llvm::unwrap(module_)->getModuleIdentifier() + ".ll";
500 std::string optName = llvm::unwrap(module_)->getModuleIdentifier() + "_opt.ll";
501 if (log.OutputLLIR()) {
502 LLVMPrintModuleToFile(module_, originName.c_str(), &error);
503 std::string errInfo = (error != nullptr) ? error : "";
504 LOG_COMPILER(INFO) << "generate " << originName << " " << errInfo;
505 }
506 LLVMVerifyModule(module_, LLVMAbortProcessAction, &error);
507 LLVMDisposeMessage(error);
508 UseRoundTripSectionMemoryManager(isJit);
509 if (!BuildMCJITEngine()) {
510 return;
511 }
512 llvm::unwrap(engine_)->setProcessAllSections(true);
513 if (fastCompileMode) {
514 BuildAndRunPassesFastMode();
515 } else {
516 BuildAndRunPasses();
517 }
518 if (log.OutputLLIR()) {
519 error = nullptr;
520 LLVMPrintModuleToFile(module_, optName.c_str(), &error);
521 std::string errInfo = (error != nullptr) ? error : "";
522 LOG_COMPILER(INFO) << "generate " << optName << " " << errInfo;
523 }
524 }
525
Initialize(LOptions option)526 void LLVMAssembler::Initialize(LOptions option)
527 {
528 std::string triple(LLVMGetTarget(module_));
529 if (triple.compare(TARGET_X64) == 0) {
530 #if defined(PANDA_TARGET_MACOS) || !defined(PANDA_TARGET_ARM64)
531 LLVMInitializeX86TargetInfo();
532 LLVMInitializeX86TargetMC();
533 LLVMInitializeX86Disassembler();
534 /* this method must be called, ohterwise "Target does not support MC emission" */
535 LLVMInitializeX86AsmPrinter();
536 LLVMInitializeX86AsmParser();
537 LLVMInitializeX86Target();
538 #endif
539 } else if (triple.compare(TARGET_AARCH64) == 0) {
540 LLVMInitializeAArch64TargetInfo();
541 LLVMInitializeAArch64TargetMC();
542 LLVMInitializeAArch64Disassembler();
543 LLVMInitializeAArch64AsmPrinter();
544 LLVMInitializeAArch64AsmParser();
545 LLVMInitializeAArch64Target();
546 } else {
547 LOG_ECMA(FATAL) << "this branch is unreachable";
548 UNREACHABLE();
549 }
550
551 LLVMLinkAllBuiltinGCs();
552 LLVMInitializeMCJITCompilerOptions(&options_, sizeof(options_));
553 options_.OptLevel = option.optLevel;
554 // NOTE: Just ensure that this field still exists for PIC option
555 options_.RelMode = static_cast<LLVMRelocMode>(option.relocMode);
556 options_.NoFramePointerElim = static_cast<int32_t>(option.genFp);
557 options_.CodeModel = LLVMCodeModelSmall;
558 }
559
SymbolLookupCallback(void * disInfo,uint64_t referenceValue,uint64_t * referenceType,uint64_t referencePC,const char ** referenceName)560 static const char *SymbolLookupCallback([[maybe_unused]] void *disInfo, [[maybe_unused]] uint64_t referenceValue,
561 uint64_t *referenceType, [[maybe_unused]] uint64_t referencePC,
562 [[maybe_unused]] const char **referenceName)
563 {
564 *referenceType = LLVMDisassembler_ReferenceType_InOut_None;
565 return nullptr;
566 }
567
GetCalleeReg2Offset(LLVMValueRef fn,const CompilerLog & log)568 kungfu::CalleeRegAndOffsetVec LLVMAssembler::GetCalleeReg2Offset(LLVMValueRef fn, const CompilerLog &log)
569 {
570 kungfu::CalleeRegAndOffsetVec info;
571 llvm::Function* func = llvm::unwrap<llvm::Function>(fn);
572 ASSERT(func != nullptr);
573 #if defined(PANDA_TARGET_MACOS)
574 for (const auto &Attr : func->getAttributes().getFnAttributes()) {
575 #else
576 for (const auto &Attr : func->getAttributes().getFnAttrs()) {
577 #endif
578 if (Attr.isStringAttribute()) {
579 std::string str = std::string(Attr.getKindAsString().data());
580 std::string expectedKey = "DwarfReg";
581 size_t keySZ = expectedKey.size();
582 size_t strSZ = str.size();
583 if (strSZ >= keySZ && str.substr(0, keySZ) == expectedKey) {
584 int RegNum = std::stoi(str.substr(keySZ, strSZ - keySZ));
585 auto value = std::stoi(std::string(Attr.getValueAsString()));
586 info.push_back(std::make_pair(RegNum, value));
587 (void)log;
588 }
589 }
590 }
591 return info;
592 }
593
594 int LLVMAssembler::GetFpDeltaPrevFramSp(LLVMValueRef fn, const CompilerLog &log)
595 {
596 int fpToCallerSpDelta = 0;
597 const char attrKey[] = "fpToCallerSpDelta"; // this key must consistent with llvm backend.
598 LLVMAttributeRef attrirbuteRef = LLVMGetStringAttributeAtIndex(fn, llvm::AttributeList::FunctionIndex,
599 attrKey, strlen(attrKey));
600 if (attrirbuteRef) {
601 llvm::Attribute attr = llvm::unwrap(attrirbuteRef);
602 auto value = attr.getValueAsString().data();
603 fpToCallerSpDelta = atoi(value);
604 if (log.AllMethod()) {
605 size_t length;
606 LOG_COMPILER(DEBUG) << " funcName: " << LLVMGetValueName2(fn, &length) << " fpToCallerSpDelta:"
607 << fpToCallerSpDelta;
608 }
609 }
610 return fpToCallerSpDelta;
611 }
612
613 static uint32_t GetInstrValue(size_t instrSize, uint8_t *instrAddr)
614 {
615 uint32_t value = 0;
616 if (instrSize <= sizeof(uint32_t)) {
617 if (memcpy_s(&value, sizeof(uint32_t), instrAddr, instrSize) != EOK) {
618 LOG_FULL(FATAL) << "memcpy_s failed";
619 UNREACHABLE();
620 }
621 }
622 return value;
623 }
624
625 void LLVMAssembler::PrintInstAndStep(uint64_t &instrOffset, uint8_t **instrAddr, uintptr_t &numBytes,
626 size_t instSize, uint64_t textOffset, char *outString,
627 std::ostringstream &codeStream, bool logFlag)
628 {
629 if (instSize == 0) {
630 instSize = 4; // 4: default instruction step size while instruction can't be resolved or be constant
631 }
632 if (logFlag) {
633 uint64_t unitedInstOffset = instrOffset + textOffset;
634 // 8: length of output content
635 codeStream << std::setw(8) << std::setfill('0') << std::hex << unitedInstOffset << ":" << std::setw(8)
636 << GetInstrValue(instSize, *instrAddr) << " " << outString << std::endl;
637 }
638 instrOffset += instSize;
639 *instrAddr += instSize;
640 numBytes -= instSize;
641 }
642
643 void LLVMAssembler::Disassemble(const std::map<uintptr_t, std::string> *addr2name,
644 const std::string& triple, uint8_t *buf, size_t size)
645 {
646 LLVMModuleRef module = LLVMModuleCreateWithName("Emit");
647 LLVMSetTarget(module, triple.c_str());
648 LLVMDisasmContextRef ctx = LLVMCreateDisasm(LLVMGetTarget(module), nullptr, 0, nullptr, SymbolLookupCallback);
649 if (!ctx) {
650 LOG_COMPILER(ERROR) << "ERROR: Couldn't create disassembler for triple!";
651 return;
652 }
653 uint8_t *instrAddr = buf;
654 uint64_t bufAddr = reinterpret_cast<uint64_t>(buf);
655 size_t numBytes = size;
656 uint64_t instrOffset = 0;
657 const size_t outStringSize = 256;
658 char outString[outStringSize];
659 std::ostringstream codeStream;
660 while (numBytes > 0) {
661 uint64_t addr = reinterpret_cast<uint64_t>(instrAddr) - bufAddr;
662 if (addr2name != nullptr && addr2name->find(addr) != addr2name->end()) {
663 std::string methodName = addr2name->at(addr);
664 codeStream << "------------------- asm code [" << methodName << "] -------------------"
665 << std::endl;
666 }
667 size_t instSize = LLVMDisasmInstruction(ctx, instrAddr, numBytes, instrOffset, outString, outStringSize);
668 PrintInstAndStep(instrOffset, &instrAddr, numBytes, instSize, 0, outString, codeStream);
669 }
670 LOG_ECMA(INFO) << "\n" << codeStream.str();
671 LLVMDisasmDispose(ctx);
672 }
673
674 static void DecodeDebugInfo(uint64_t addr, uint64_t secIndex, char* outString, size_t outStringSize,
675 DWARFContext *ctx, LLVMModule* module, const std::string &funcName)
676 {
677 object::SectionedAddress secAddr = {addr, secIndex};
678 DILineInfoSpecifier spec;
679 spec.FNKind = DINameKind::ShortName;
680
681 DILineInfo info = ctx->getLineInfoForAddress(secAddr, spec);
682 if (info && info.Line > 0) {
683 std::string debugInfo = "\t\t;";
684 debugInfo += module->GetDebugInfo()->GetComment(funcName, info.Line - 1);
685 size_t len = strlen(outString);
686 if (len + debugInfo.size() < outStringSize) {
687 if (strcpy_s(outString + len, outStringSize - len, debugInfo.c_str()) != EOK) {
688 LOG_FULL(FATAL) << "strcpy_s failed";
689 UNREACHABLE();
690 }
691 }
692 }
693 }
694
695 uint64_t LLVMAssembler::GetTextSectionIndex() const
696 {
697 uint64_t index = 0;
698 for (object::section_iterator it = objFile_->section_begin(); it != objFile_->section_end(); ++it) {
699 auto name = it->getName();
700 if (name) {
701 std::string str = name->str();
702 if (str == ".text") {
703 index = it->getIndex();
704 ASSERT(it->isText());
705 break;
706 }
707 }
708 }
709 return index;
710 }
711
712 void LLVMAssembler::Disassemble(const std::map<uintptr_t, std::string> &addr2name, uint64_t textOffset,
713 const CompilerLog &log, const MethodLogList &logList,
714 std::ostringstream &codeStream) const
715 {
716 const uint64_t textSecIndex = GetTextSectionIndex();
717 LLVMDisasmContextRef disCtx = LLVMCreateDisasm(LLVMGetTarget(module_), nullptr, 0, nullptr, SymbolLookupCallback);
718 bool logFlag = false;
719 std::unique_ptr<DWARFContext> dwarfCtx = DWARFContext::create(*objFile_);
720
721 for (auto it : codeInfo_.GetCodeInfo()) {
722 uint8_t *instrAddr = it.first;
723 size_t numBytes = it.second;
724 uint64_t instrOffset = 0;
725
726 const size_t outStringSize = 512;
727 char outString[outStringSize] = {'\0'};
728 std::string methodName;
729
730 while (numBytes > 0) {
731 uint64_t addr = reinterpret_cast<uint64_t>(instrAddr);
732 if (addr2name.find(addr) != addr2name.end()) {
733 methodName = addr2name.at(addr);
734 logFlag = log.OutputASM();
735 if (log.CertainMethod()) {
736 logFlag = logFlag && logList.IncludesMethod(methodName);
737 } else if (log.NoneMethod()) {
738 logFlag = false;
739 }
740 if (logFlag) {
741 codeStream << "------------------- asm code [" << methodName << "] -------------------"
742 << std::endl;
743 }
744 }
745
746 size_t instSize = LLVMDisasmInstruction(disCtx, instrAddr, numBytes, instrOffset, outString, outStringSize);
747 DecodeDebugInfo(instrOffset, textSecIndex, outString, outStringSize,
748 dwarfCtx.get(), llvmModule_, methodName);
749 PrintInstAndStep(instrOffset, &instrAddr, numBytes, instSize, textOffset, outString, codeStream, logFlag);
750 }
751 }
752 LLVMDisasmDispose(disCtx);
753 }
754 } // namespace panda::ecmascript::kungfu
755