1 /*
2 * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "ecmascript/deoptimizer/deoptimizer.h"
16
17
18 #include "ecmascript/dfx/stackinfo/js_stackinfo.h"
19 #include "ecmascript/interpreter/slow_runtime_stub.h"
20 #include "ecmascript/jit/jit.h"
21 #include "ecmascript/stubs/runtime_stubs-inl.h"
22
23 namespace panda::ecmascript {
24 class FrameWriter {
25 public:
FrameWriter(Deoptimizier * deoptimizier)26 explicit FrameWriter(Deoptimizier *deoptimizier) : thread_(deoptimizier->GetThread())
27 {
28 JSTaggedType *prevSp = const_cast<JSTaggedType *>(thread_->GetCurrentSPFrame());
29 start_ = top_ = EcmaInterpreter::GetInterpreterFrameEnd(thread_, prevSp);
30 }
31
PushValue(JSTaggedType value)32 void PushValue(JSTaggedType value)
33 {
34 *(--top_) = value;
35 }
36
PushRawValue(uintptr_t value)37 void PushRawValue(uintptr_t value)
38 {
39 *(--top_) = value;
40 }
41
Reserve(size_t size)42 bool Reserve(size_t size)
43 {
44 return !thread_->DoStackOverflowCheck(top_ - size);
45 }
46
ReserveAsmInterpretedFrame()47 AsmInterpretedFrame *ReserveAsmInterpretedFrame()
48 {
49 auto frame = AsmInterpretedFrame::GetFrameFromSp(top_);
50 top_ = reinterpret_cast<JSTaggedType *>(frame);
51 return frame;
52 }
53
GetStart() const54 JSTaggedType *GetStart() const
55 {
56 return start_;
57 }
58
GetTop() const59 JSTaggedType *GetTop() const
60 {
61 return top_;
62 }
63
GetFirstFrame() const64 JSTaggedType *GetFirstFrame() const
65 {
66 return firstFrame_;
67 }
68
RecordFirstFrame()69 void RecordFirstFrame()
70 {
71 firstFrame_ = top_;
72 }
73
ReviseValueByIndex(JSTaggedType value,size_t index)74 void ReviseValueByIndex(JSTaggedType value, size_t index)
75 {
76 ASSERT(index < static_cast<size_t>(start_ - top_));
77 *(top_ + index) = value;
78 }
79
80 private:
81 JSThread *thread_ {nullptr};
82 JSTaggedType *start_ {nullptr};
83 JSTaggedType *top_ {nullptr};
84 JSTaggedType *firstFrame_ {nullptr};
85 };
86
Deoptimizier(JSThread * thread,size_t depth)87 Deoptimizier::Deoptimizier(JSThread *thread, size_t depth) : thread_(thread), inlineDepth_(depth)
88 {
89 CalleeReg callreg;
90 numCalleeRegs_ = static_cast<size_t>(callreg.GetCallRegNum());
91 JSRuntimeOptions options = thread_->GetEcmaVM()->GetJSOptions();
92 traceDeopt_ = options.GetTraceDeopt();
93 }
94
CollectVregs(const std::vector<kungfu::ARKDeopt> & deoptBundle,size_t shift)95 void Deoptimizier::CollectVregs(const std::vector<kungfu::ARKDeopt>& deoptBundle, size_t shift)
96 {
97 deoptVregs_.clear();
98 for (size_t i = 0; i < deoptBundle.size(); i++) {
99 ARKDeopt deopt = deoptBundle.at(i);
100 JSTaggedType v;
101 VRegId id = deopt.id;
102 if (std::holds_alternative<DwarfRegAndOffsetType>(deopt.value)) {
103 ASSERT(deopt.kind == LocationTy::Kind::INDIRECT);
104 auto value = std::get<DwarfRegAndOffsetType>(deopt.value);
105 DwarfRegType dwarfReg = value.first;
106 OffsetType offset = value.second;
107 ASSERT (dwarfReg == GCStackMapRegisters::FP || dwarfReg == GCStackMapRegisters::SP);
108 uintptr_t addr;
109 if (dwarfReg == GCStackMapRegisters::SP) {
110 addr = context_.callsiteSp + offset;
111 } else {
112 addr = context_.callsiteFp + offset;
113 }
114 v = *(reinterpret_cast<JSTaggedType *>(addr));
115 } else if (std::holds_alternative<LargeInt>(deopt.value)) {
116 ASSERT(deopt.kind == LocationTy::Kind::CONSTANTNDEX);
117 v = JSTaggedType(static_cast<int64_t>(std::get<LargeInt>(deopt.value)));
118 } else {
119 ASSERT(std::holds_alternative<IntType>(deopt.value));
120 ASSERT(deopt.kind == LocationTy::Kind::CONSTANT);
121 v = JSTaggedType(static_cast<int64_t>(std::get<IntType>(deopt.value)));
122 }
123 size_t curDepth = DecodeDeoptDepth(id, shift);
124 OffsetType vregId = static_cast<OffsetType>(DecodeVregIndex(id, shift));
125 if (vregId != static_cast<OffsetType>(SpecVregIndex::PC_OFFSET_INDEX)) {
126 deoptVregs_.insert({{curDepth, vregId}, JSHandle<JSTaggedValue>(thread_, JSTaggedValue(v))});
127 } else {
128 pc_.insert({curDepth, static_cast<size_t>(v)});
129 }
130 }
131 }
132
133 // when AOT trigger deopt, frame layout as the following
134 // * OptimizedJSFunctionFrame layout description as the following:
135 // +--------------------------+ ---------------
136 // | ...... | ^
137 // | ...... | callerFunction
138 // | ...... | |
139 // |--------------------------| |
140 // | args | v
141 // +--------------------------+ ---------------
142 // | returnAddr | ^
143 // |--------------------------| |
144 // | callsiteFp | |
145 // |--------------------------| OptimizedJSFunction FrameType:OPTIMIZED_JS_FUNCTION_FRAME
146 // | frameType | |
147 // |--------------------------| |
148 // | call-target | |
149 // |--------------------------| |
150 // | lexEnv | |
151 // |--------------------------| |
152 // | ........... | v
153 // +--------------------------+ ---------------
154 // | returnAddr | ^
155 // |--------------------------| |
156 // | callsiteFp | |
157 // |--------------------------| __llvm_deoptimize FrameType:OPTIMIZED_FRAME
158 // | frameType | |
159 // |--------------------------| |
160 // | No CalleeSave | |
161 // | Registers | v
162 // +--------------------------+ ---------------
163 // | returnAddr | ^
164 // |--------------------------| |
165 // | callsiteFp | |
166 // |--------------------------| DeoptHandlerAsm FrameType:ASM_BRIDGE_FRAME
167 // | frameType | |
168 // |--------------------------| |
169 // | glue | |
170 // |--------------------------| |
171 // | CalleeSave Registers | v
172 // +--------------------------+ ---------------
173 // | ......... | ^
174 // | ......... | CallRuntime FrameType:LEAVE_FRAME
175 // | ......... | |
176 // | ......... | v
177 // |--------------------------| ---------------
178
179 // After gathering the necessary information(After Call Runtime), frame layout after constructing
180 // asminterpreterframe is shown as the following:
181 // +----------------------------------+---------+
182 // | ...... | ^
183 // | ...... | callerFunction
184 // | ...... | |
185 // |----------------------------------| |
186 // | args | v
187 // +----------------------------------+---------+
188 // | returnAddr | ^
189 // |----------------------------------| |
190 // | frameType | |
191 // |----------------------------------| ASM_INTERPRETER_BRIDGE_FRAME
192 // | callsiteFp | |
193 // |----------------------------------| |
194 // | ........... | v
195 // +----------------------------------+---------+
196 // | returnAddr |
197 // |----------------------------------|
198 // | argv[n-1] |
199 // |----------------------------------|
200 // | ...... |
201 // |----------------------------------|
202 // | thisArg [maybe not exist] |
203 // |----------------------------------|
204 // | newTarget [maybe not exist] |
205 // |----------------------------------|
206 // | ...... |
207 // |----------------------------------|
208 // | Vregs [not exist in native] |
209 // +----------------------------------+--------+
210 // | . . . . | ^
211 // | InterpretedFrameBase | |
212 // | . . . . | |
213 // |----------------------------------| |
214 // | pc(bytecode addr) | |
215 // |----------------------------------| |
216 // | sp(current stack pointer) | |
217 // |----------------------------------| AsmInterpretedFrame 0
218 // | callSize | |
219 // |----------------------------------| |
220 // | env | |
221 // |----------------------------------| |
222 // | acc | |
223 // |----------------------------------| |
224 // | thisObj | |
225 // |----------------------------------| |
226 // | call-target | v
227 // +----------------------------------+--------+
228 // | argv[n-1] |
229 // |----------------------------------|
230 // | ...... |
231 // |----------------------------------|
232 // | thisArg [maybe not exist] |
233 // |----------------------------------|
234 // | newTarget [maybe not exist] |
235 // |----------------------------------|
236 // | ...... |
237 // |----------------------------------|
238 // | Vregs [not exist in native] |
239 // +----------------------------------+--------+
240 // | . . . . | ^
241 // | InterpretedFrameBase | |
242 // | . . . . | |
243 // |----------------------------------| |
244 // | pc(bytecode addr) | |
245 // |----------------------------------| |
246 // | sp(current stack pointer) | |
247 // |----------------------------------| AsmInterpretedFrame 1
248 // | callSize | |
249 // |----------------------------------| |
250 // | env | |
251 // |----------------------------------| |
252 // | acc | |
253 // |----------------------------------| |
254 // | thisObj | |
255 // |----------------------------------| |
256 // | call-target | v
257 // +----------------------------------+--------+
258 // | . . . . | ^
259 // | . . . . | AsmInterpretedFrame n
260 // | . . . . | v
261 // +----------------------------------+--------+
262
263 template<class T>
AssistCollectDeoptBundleVec(FrameIterator & it,T & frame)264 void Deoptimizier::AssistCollectDeoptBundleVec(FrameIterator &it, T &frame)
265 {
266 CalleeRegAndOffsetVec calleeRegInfo;
267 frame->GetFuncCalleeRegAndOffset(it, calleeRegInfo);
268 context_.calleeRegAndOffset = calleeRegInfo;
269 context_.callsiteSp = it.GetCallSiteSp();
270 context_.callsiteFp = reinterpret_cast<uintptr_t>(it.GetSp());
271 auto preFrameSp = frame->ComputePrevFrameSp(it);
272 frameArgc_ = frame->GetArgc(preFrameSp);
273 frameArgvs_ = frame->GetArgv(preFrameSp);
274 stackContext_.callFrameTop_ = it.GetPrevFrameCallSiteSp();
275 stackContext_.returnAddr_ = frame->GetReturnAddr();
276 stackContext_.callerFp_ = reinterpret_cast<uintptr_t>(frame->GetPrevFrameFp());
277 }
278
CollectDeoptBundleVec(std::vector<ARKDeopt> & deoptBundle)279 void Deoptimizier::CollectDeoptBundleVec(std::vector<ARKDeopt>& deoptBundle)
280 {
281 JSTaggedType *lastLeave = const_cast<JSTaggedType *>(thread_->GetLastLeaveFrame());
282 FrameIterator it(lastLeave, thread_);
283 // note: last deopt bridge frame is generated by DeoptHandlerAsm, callee Regs is grow from this frame
284 for (; !it.Done() && deoptBundle.empty(); it.Advance<GCVisitedFlag::DEOPT>()) {
285 FrameType type = it.GetFrameType();
286 switch (type) {
287 case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
288 case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
289 auto frame = it.GetFrame<OptimizedJSFunctionFrame>();
290 frame->GetDeoptBundleInfo(it, deoptBundle);
291 AssistCollectDeoptBundleVec(it, frame);
292 break;
293 }
294 case FrameType::FASTJIT_FUNCTION_FRAME:
295 case FrameType::FASTJIT_FAST_CALL_FUNCTION_FRAME: {
296 auto frame = it.GetFrame<FASTJITFunctionFrame>();
297 frame->GetDeoptBundleInfo(it, deoptBundle);
298 AssistCollectDeoptBundleVec(it, frame);
299 break;
300 }
301 case FrameType::ASM_BRIDGE_FRAME: {
302 auto sp = reinterpret_cast<uintptr_t*>(it.GetSp());
303 static constexpr size_t TYPE_GLUE_SLOT = 2; // 2: skip type & glue
304 sp -= TYPE_GLUE_SLOT;
305 calleeRegAddr_ = sp - numCalleeRegs_;
306 break;
307 }
308 case FrameType::OPTIMIZED_FRAME:
309 case FrameType::LEAVE_FRAME:
310 break;
311 default: {
312 LOG_FULL(FATAL) << "frame type error!";
313 UNREACHABLE();
314 }
315 }
316 }
317 ASSERT(!it.Done());
318 }
319
GetMethod(JSTaggedValue & target)320 Method* Deoptimizier::GetMethod(JSTaggedValue &target)
321 {
322 ECMAObject *callTarget = reinterpret_cast<ECMAObject*>(target.GetTaggedObject());
323 ASSERT(callTarget != nullptr);
324 Method *method = callTarget->GetCallTarget();
325 return method;
326 }
327
RelocateCalleeSave()328 void Deoptimizier::RelocateCalleeSave()
329 {
330 CalleeReg callreg;
331 for (auto &it: context_.calleeRegAndOffset) {
332 auto reg = it.first;
333 auto offset = it.second;
334 uintptr_t value = *(reinterpret_cast<uintptr_t *>(context_.callsiteFp + offset));
335 int order = callreg.FindCallRegOrder(reg);
336 calleeRegAddr_[order] = value;
337 }
338 }
339
CollectVirtualRegisters(JSTaggedValue callTarget,Method * method,FrameWriter * frameWriter,size_t curDepth)340 bool Deoptimizier::CollectVirtualRegisters(JSTaggedValue callTarget, Method *method, FrameWriter *frameWriter,
341 size_t curDepth)
342 {
343 int32_t actualNumArgs = 0;
344 int32_t declaredNumArgs = 0;
345 if (curDepth == 0) {
346 actualNumArgs = static_cast<int32_t>(GetDeoptValue(curDepth,
347 static_cast<int32_t>(SpecVregIndex::ACTUAL_ARGC_INDEX)).GetInt());
348 declaredNumArgs = static_cast<int32_t>(method->GetNumArgsWithCallField());
349 } else {
350 // inline method actualNumArgs equal to declaredNumArgs
351 actualNumArgs = static_cast<int32_t>(method->GetNumArgsWithCallField());
352 declaredNumArgs = static_cast<int32_t>(method->GetNumArgsWithCallField());
353 }
354
355 int32_t callFieldNumVregs = static_cast<int32_t>(method->GetNumVregsWithCallField());
356
357 // layout of frame:
358 // [maybe argc] [actual args] [reserved args] [call field virtual regs]
359
360 // [maybe argc]
361 bool isFastCall = JSFunctionBase::IsFastCallFromCallTarget(callTarget);
362 if (!isFastCall && declaredNumArgs != actualNumArgs) {
363 auto value = JSTaggedValue(actualNumArgs);
364 frameWriter->PushValue(value.GetRawData());
365 }
366 int32_t virtualIndex = declaredNumArgs + callFieldNumVregs +
367 static_cast<int32_t>(method->GetNumRevervedArgs()) - 1;
368 if (!frameWriter->Reserve(static_cast<size_t>(virtualIndex))) {
369 return false;
370 }
371 for (int32_t i = static_cast<int32_t>(declaredNumArgs - 1); i >= 0; i--) {
372 JSTaggedValue value = JSTaggedValue::Undefined();
373 // deopt value
374 if (HasDeoptValue(curDepth, virtualIndex)) {
375 value = GetDeoptValue(curDepth, virtualIndex);
376 }
377 frameWriter->PushValue(value.GetRawData());
378 virtualIndex--;
379 }
380
381 // [reserved args]
382 if (method->HaveThisWithCallField()) {
383 JSTaggedValue value = deoptVregs_.at(
384 {curDepth, static_cast<OffsetType>(SpecVregIndex::THIS_OBJECT_INDEX)}).GetTaggedValue();
385 frameWriter->PushValue(value.GetRawData());
386 virtualIndex--;
387 }
388 if (method->HaveNewTargetWithCallField()) {
389 JSTaggedValue value = deoptVregs_.at(
390 {curDepth, static_cast<OffsetType>(SpecVregIndex::NEWTARGET_INDEX)}).GetTaggedValue();
391 frameWriter->PushValue(value.GetRawData());
392 virtualIndex--;
393 }
394 if (method->HaveFuncWithCallField()) {
395 JSTaggedValue value = deoptVregs_.at(
396 {curDepth, static_cast<OffsetType>(SpecVregIndex::FUNC_INDEX)}).GetTaggedValue();
397 frameWriter->PushValue(value.GetRawData());
398 virtualIndex--;
399 }
400
401 // [call field virtual regs]
402 for (int32_t i = virtualIndex; i >= 0; i--) {
403 JSTaggedValue value = GetDeoptValue(curDepth, virtualIndex);
404 frameWriter->PushValue(value.GetRawData());
405 virtualIndex--;
406 }
407 // revise correct a0 - aN virtual regs , for example: ldobjbyname key; sta a2; update value to a2
408 // +--------------------------+ ^
409 // | aN | |
410 // +--------------------------+ |
411 // | ... | |
412 // +--------------------------+ |
413 // | a2(this) | |
414 // +--------------------------+ revise correct vreg
415 // | a1(newtarget) | |
416 // +--------------------------+ |
417 // | a0(func) | |
418 // |--------------------------| v
419 // | v0 - vN |
420 // sp --> |--------------------------|
421 int32_t vregsAndArgsNum = declaredNumArgs + callFieldNumVregs +
422 static_cast<int32_t>(method->GetNumRevervedArgs());
423 for (int32_t i = callFieldNumVregs; i < vregsAndArgsNum; i++) {
424 JSTaggedValue value = JSTaggedValue::Undefined();
425 if (HasDeoptValue(curDepth, i)) {
426 value = GetDeoptValue(curDepth, i);
427 frameWriter->ReviseValueByIndex(value.GetRawData(), i);
428 }
429 }
430 return true;
431 }
432
Dump(JSTaggedValue callTarget,kungfu::DeoptType type,size_t depth)433 void Deoptimizier::Dump(JSTaggedValue callTarget, kungfu::DeoptType type, size_t depth)
434 {
435 if (thread_->IsPGOProfilerEnable()) {
436 JSFunction *function = JSFunction::Cast(callTarget);
437 auto profileTypeInfo = function->GetProfileTypeInfo();
438 if (profileTypeInfo.IsUndefined()) {
439 SlowRuntimeStub::NotifyInlineCache(thread_, function);
440 }
441 }
442 if (traceDeopt_) {
443 std::string checkType = DisplayItems(type);
444 LOG_TRACE(INFO) << "Check Type: " << checkType;
445 std::string data = JsStackInfo::BuildJsStackTrace(thread_, true);
446 LOG_COMPILER(INFO) << "Deoptimize" << data;
447 const uint8_t *pc = GetMethod(callTarget)->GetBytecodeArray() + pc_.at(depth);
448 BytecodeInstruction inst(pc);
449 LOG_COMPILER(INFO) << inst;
450 }
451 }
452
DisplayItems(DeoptType type)453 std::string Deoptimizier::DisplayItems(DeoptType type)
454 {
455 const std::map<DeoptType, const char *> strMap = {
456 #define DEOPT_NAME_MAP(NAME, TYPE) {DeoptType::TYPE, #NAME},
457 GATE_META_DATA_DEOPT_REASON(DEOPT_NAME_MAP)
458 #undef DEOPT_NAME_MAP
459 };
460 if (strMap.count(type) > 0) {
461 return strMap.at(type);
462 }
463 return "DeoptType-" + std::to_string(static_cast<uint8_t>(type));
464 }
465
466 // layout of frameWriter
467 // |--------------------------| --------------> start(n)
468 // | args |
469 // | this |
470 // | newTarget |
471 // | callTarget |
472 // | vregs |
473 // |---------------------------
474 // | ASM Interpreter |
475 // +--------------------------+ --------------> end(n)
476 // | outputcounts | outputcounts = end(n) - start(n)
477 // |--------------------------| --------------> start(n-1)
478 // | args |
479 // | this |
480 // | newTarget |
481 // | callTarget |
482 // | vregs |
483 // |-------------------------------------------
484 // | ASM Interpreter |
485 // +--------------------------+ --------------> end(n-1)
486 // | outputcounts | outputcounts = end(n-1) - start(n-1)
487 // |--------------------------| --------------> start(n-1)
488 // | ...... |
489 // +--------------------------+ ---------------
490 // | callerFp_ | ^
491 // | returnAddr_ | stackContext
492 // | callFrameTop_ | |
493 // | inlineDepth | v
494 // |--------------------------| ---------------
495
ConstructAsmInterpretFrame()496 JSTaggedType Deoptimizier::ConstructAsmInterpretFrame()
497 {
498 FrameWriter frameWriter(this);
499 // Push asm interpreter frame
500 for (int32_t curDepth = static_cast<int32_t>(inlineDepth_); curDepth >= 0; curDepth--) {
501 auto start = frameWriter.GetTop();
502 JSTaggedValue callTarget = GetDeoptValue(curDepth, static_cast<int32_t>(SpecVregIndex::FUNC_INDEX));
503 auto method = GetMethod(callTarget);
504 if (!CollectVirtualRegisters(callTarget, method, &frameWriter, curDepth)) {
505 return JSTaggedValue::Exception().GetRawData();
506 }
507 AsmInterpretedFrame *statePtr = frameWriter.ReserveAsmInterpretedFrame();
508 const uint8_t *resumePc = method->GetBytecodeArray() + pc_.at(curDepth);
509 JSTaggedValue thisObj = GetDeoptValue(curDepth, static_cast<int32_t>(SpecVregIndex::THIS_OBJECT_INDEX));
510 auto acc = GetDeoptValue(curDepth, static_cast<int32_t>(SpecVregIndex::ACC_INDEX));
511 statePtr->function = callTarget;
512 statePtr->acc = acc;
513 statePtr->env = GetDeoptValue(curDepth, static_cast<int32_t>(SpecVregIndex::ENV_INDEX));
514 statePtr->callSize = GetCallSize(curDepth, resumePc);
515 statePtr->fp = 0; // need update
516 statePtr->thisObj = thisObj;
517 statePtr->pc = resumePc;
518 // -uintptr_t skip lr
519 if (curDepth == 0) {
520 statePtr->base.prev = reinterpret_cast<JSTaggedType *>(stackContext_.callFrameTop_ - sizeof(uintptr_t));
521 } else {
522 statePtr->base.prev = 0; // need update
523 }
524
525 statePtr->base.type = FrameType::ASM_INTERPRETER_FRAME;
526
527 // construct stack context
528 auto end = frameWriter.GetTop();
529 auto outputCount = start - end;
530 frameWriter.PushRawValue(outputCount);
531 }
532
533 RelocateCalleeSave();
534
535 frameWriter.PushRawValue(stackContext_.callerFp_);
536 frameWriter.PushRawValue(stackContext_.returnAddr_);
537 frameWriter.PushRawValue(stackContext_.callFrameTop_);
538 frameWriter.PushRawValue(inlineDepth_);
539 return reinterpret_cast<JSTaggedType>(frameWriter.GetTop());
540 }
541
ResetJitHotness(JSFunction * jsFunc) const542 void Deoptimizier::ResetJitHotness(JSFunction *jsFunc) const
543 {
544 if (jsFunc->GetMachineCode().IsMachineCodeObject()) {
545 JSTaggedValue profileTypeInfoVal = jsFunc->GetProfileTypeInfo();
546 if (!profileTypeInfoVal.IsUndefined()) {
547 ProfileTypeInfo *profileTypeInfo = ProfileTypeInfo::Cast(profileTypeInfoVal.GetTaggedObject());
548 profileTypeInfo->SetJitHotnessCnt(0);
549 constexpr uint16_t thresholdStep = 4;
550 constexpr uint16_t thresholdLimit = ProfileTypeInfo::JIT_DISABLE_FLAG / thresholdStep;
551 uint16_t threshold = profileTypeInfo->GetJitHotnessThreshold();
552 threshold = threshold >= thresholdLimit ? ProfileTypeInfo::JIT_DISABLE_FLAG : threshold * thresholdStep;
553 profileTypeInfo->SetJitHotnessThreshold(threshold);
554 ProfileTypeInfoCell::Cast(jsFunc->GetRawProfileTypeInfo())->SetMachineCode(thread_, JSTaggedValue::Hole());
555 Method *method = Method::Cast(jsFunc->GetMethod().GetTaggedObject());
556 LOG_JIT(DEBUG) << "reset jit hotness for func: " << method->GetMethodName() << ", threshold:" << threshold;
557 }
558 }
559 }
560
ClearCompiledCodeStatusWhenDeopt(JSFunction * func,Method * method)561 void Deoptimizier::ClearCompiledCodeStatusWhenDeopt(JSFunction *func, Method *method)
562 {
563 if (func->GetMachineCode().IsMachineCodeObject()) {
564 Jit::GetInstance()->GetJitDfx()->SetJitDeoptCount();
565 }
566 if (func->IsCompiledCode()) {
567 bool isFastCall = func->IsCompiledFastCall(); // get this flag before clear it
568 uintptr_t entry =
569 isFastCall ? thread_->GetRTInterface(kungfu::RuntimeStubCSigns::ID_FastCallToAsmInterBridge)
570 : thread_->GetRTInterface(kungfu::RuntimeStubCSigns::ID_AOTCallToAsmInterBridge);
571 func->SetCodeEntry(entry);
572 method->ClearAOTStatusWhenDeopt(entry);
573 func->ClearCompiledCodeFlags();
574 ResetJitHotness(func);
575 func->ClearMachineCode(thread_);
576 } // Do not change the func code entry if the method is not aot or deopt has happened already
577 }
578
UpdateAndDumpDeoptInfo(kungfu::DeoptType type)579 void Deoptimizier::UpdateAndDumpDeoptInfo(kungfu::DeoptType type)
580 {
581 // depth records the number of layers of nested calls when deopt occurs
582 for (size_t i = 0; i <= inlineDepth_; i++) {
583 JSTaggedValue callTarget = GetDeoptValue(i, static_cast<int32_t>(SpecVregIndex::FUNC_INDEX));
584 auto func = JSFunction::Cast(callTarget.GetTaggedObject());
585 if (func->GetMachineCode().IsMachineCodeObject()) {
586 MachineCode *machineCode = MachineCode::Cast(func->GetMachineCode().GetTaggedObject());
587 if (type != kungfu::DeoptType::OSRLOOPEXIT &&
588 machineCode->GetOSROffset() != MachineCode::INVALID_OSR_OFFSET) {
589 machineCode->SetOsrDeoptFlag(true);
590 }
591 }
592 auto method = GetMethod(callTarget);
593 if (i == inlineDepth_) {
594 Dump(callTarget, type, i);
595 }
596 ASSERT(thread_ != nullptr);
597 uint8_t deoptThreshold = method->GetDeoptThreshold();
598 if (deoptThreshold > 0) {
599 method->SetDeoptType(type);
600 method->SetDeoptThreshold(--deoptThreshold);
601 } else {
602 ClearCompiledCodeStatusWhenDeopt(func, method);
603 }
604 }
605 }
606
607 // call instructions need compute jumpSize
GetCallSize(size_t curDepth,const uint8_t * resumePc)608 size_t Deoptimizier::GetCallSize(size_t curDepth, const uint8_t *resumePc)
609 {
610 if (inlineDepth_ > 0 && curDepth != inlineDepth_) {
611 auto op = BytecodeInstruction(resumePc).GetOpcode();
612 size_t jumpSize = BytecodeInstruction::Size(op);
613 return jumpSize;
614 }
615 return 0;
616 }
617
EncodeDeoptVregIndex(int32_t index,size_t depth,size_t shift)618 int32_t Deoptimizier::EncodeDeoptVregIndex(int32_t index, size_t depth, size_t shift)
619 {
620 if (index >= 0) {
621 return (index << shift) | depth;
622 }
623 return -((-index << shift) | depth);
624 }
625
ComputeShift(size_t depth)626 size_t Deoptimizier::ComputeShift(size_t depth)
627 {
628 size_t shift = 0;
629 if (depth != 0) {
630 shift = std::floor(std::log2(depth)) + 1;
631 }
632 return shift;
633 }
634
DecodeVregIndex(OffsetType id,size_t shift)635 int32_t Deoptimizier::DecodeVregIndex(OffsetType id, size_t shift)
636 {
637 if (id >= 0) {
638 return id >> shift;
639 }
640 return -((-id) >> shift);
641 }
642
DecodeDeoptDepth(OffsetType id,size_t shift)643 size_t Deoptimizier::DecodeDeoptDepth(OffsetType id, size_t shift)
644 {
645 size_t mask = (1 << shift) - 1;
646 if (id >= 0) {
647 return id & mask;
648 }
649 return (-id) & mask;
650 }
651 } // namespace panda::ecmascript
652