1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "android-base/logging.h"
18 #include "arch/context.h"
19 #include "arch/instruction_set.h"
20 #include "art_method-inl.h"
21 #include "art_method.h"
22 #include "base/callee_save_type.h"
23 #include "base/globals.h"
24 #include "base/pointer_size.h"
25 #include "callee_save_frame.h"
26 #include "class_root-inl.h"
27 #include "common_throws.h"
28 #include "debug_print.h"
29 #include "debugger.h"
30 #include "dex/dex_file-inl.h"
31 #include "dex/dex_file_types.h"
32 #include "dex/dex_instruction-inl.h"
33 #include "dex/method_reference.h"
34 #include "entrypoints/entrypoint_utils-inl.h"
35 #include "entrypoints/quick/callee_save_frame.h"
36 #include "entrypoints/runtime_asm_entrypoints.h"
37 #include "gc/accounting/card_table-inl.h"
38 #include "imt_conflict_table.h"
39 #include "imtable-inl.h"
40 #include "instrumentation.h"
41 #include "interpreter/interpreter.h"
42 #include "interpreter/interpreter_common.h"
43 #include "interpreter/shadow_frame-inl.h"
44 #include "jit/jit.h"
45 #include "jit/jit_code_cache.h"
46 #include "linear_alloc.h"
47 #include "method_handles.h"
48 #include "mirror/class-inl.h"
49 #include "mirror/dex_cache-inl.h"
50 #include "mirror/method.h"
51 #include "mirror/method_handle_impl.h"
52 #include "mirror/object-inl.h"
53 #include "mirror/object_array-inl.h"
54 #include "mirror/var_handle.h"
55 #include "oat/oat.h"
56 #include "oat/oat_file.h"
57 #include "oat/oat_quick_method_header.h"
58 #include "quick_exception_handler.h"
59 #include "runtime.h"
60 #include "runtime_entrypoints_list.h"
61 #include "scoped_thread_state_change-inl.h"
62 #include "stack.h"
63 #include "thread-inl.h"
64 #include "trace_profile.h"
65 #include "var_handles.h"
66 #include "well_known_classes.h"
67
68 namespace art HIDDEN {
69
70 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
71 template <typename FrameInfo>
72 class QuickArgumentVisitorImpl {
73 // Number of bytes for each out register in the caller method's frame.
74 static constexpr size_t kBytesStackArgLocation = 4;
75 // Frame size in bytes of a callee-save frame for RefsAndArgs.
76 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
77 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
78 // Offset of first GPR arg.
79 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
80 RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
81 // Offset of first FPR arg.
82 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
83 RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
84 // Offset of return address.
85 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
86 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
87
GprIndexToGprOffset(uint32_t gpr_index)88 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
89 return FrameInfo::GprIndexToGprOffsetImpl(gpr_index);
90 }
91
92 static constexpr bool kSplitPairAcrossRegisterAndStack =
93 FrameInfo::kSplitPairAcrossRegisterAndStack;
94 static constexpr bool kAlignPairRegister = FrameInfo::kAlignPairRegister;
95 static constexpr bool kQuickSoftFloatAbi = FrameInfo::kQuickSoftFloatAbi;
96 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled =
97 FrameInfo::kQuickDoubleRegAlignedFloatBackFilled;
98 static constexpr bool kQuickSkipOddFpRegisters = FrameInfo::kQuickSkipOddFpRegisters;
99 static constexpr size_t kNumQuickGprArgs = FrameInfo::kNumQuickGprArgs;
100 static constexpr size_t kNumQuickFprArgs = FrameInfo::kNumQuickFprArgs;
101 static constexpr bool kGprFprLockstep = FrameInfo::kGprFprLockstep;
102 static constexpr bool kNaNBoxing = FrameInfo::kNanBoxing;
103
104 public:
NaNBoxing()105 static constexpr bool NaNBoxing() { return FrameInfo::kNaNBoxing; }
106
GetThisObjectReference(ArtMethod ** sp)107 static StackReference<mirror::Object>* GetThisObjectReference(ArtMethod** sp)
108 REQUIRES_SHARED(Locks::mutator_lock_) {
109 CHECK_GT(kNumQuickGprArgs, 0u);
110 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
111 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
112 GprIndexToGprOffset(kThisGprIndex);
113 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
114 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address);
115 }
116
GetCallingMethodAndDexPc(ArtMethod ** sp,uint32_t * dex_pc)117 static ArtMethod* GetCallingMethodAndDexPc(ArtMethod** sp, uint32_t* dex_pc)
118 REQUIRES_SHARED(Locks::mutator_lock_) {
119 DCHECK((*sp)->IsCalleeSaveMethod());
120 return GetCalleeSaveMethodCallerAndDexPc(sp, CalleeSaveType::kSaveRefsAndArgs, dex_pc);
121 }
122
GetCallingMethod(ArtMethod ** sp)123 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
124 uint32_t dex_pc;
125 return GetCallingMethodAndDexPc(sp, &dex_pc);
126 }
127
GetOuterMethod(ArtMethod ** sp)128 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
129 DCHECK((*sp)->IsCalleeSaveMethod());
130 uint8_t* previous_sp =
131 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
132 return *reinterpret_cast<ArtMethod**>(previous_sp);
133 }
134
GetCallingPcAddr(ArtMethod ** sp)135 static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
136 DCHECK((*sp)->IsCalleeSaveMethod());
137 uint8_t* return_adress_spill =
138 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
139 return return_adress_spill;
140 }
141
142 // For the given quick ref and args quick frame, return the caller's PC.
GetCallingPc(ArtMethod ** sp)143 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
144 return *reinterpret_cast<uintptr_t*>(GetCallingPcAddr(sp));
145 }
146
QuickArgumentVisitorImpl(ArtMethod ** sp,bool is_static,std::string_view shorty)147 QuickArgumentVisitorImpl(ArtMethod** sp, bool is_static, std::string_view shorty)
148 REQUIRES_SHARED(Locks::mutator_lock_)
149 : is_static_(is_static),
150 shorty_(shorty),
151 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
152 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
153 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize +
154 sizeof(ArtMethod*)), // Skip ArtMethod*.
155 gpr_index_(0),
156 fpr_index_(0),
157 fpr_double_index_(0),
158 stack_index_(0),
159 cur_type_(Primitive::kPrimVoid),
160 is_split_long_or_double_(false) {
161 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
162 "Number of Quick FPR arguments unexpected");
163 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
164 "Double alignment unexpected");
165 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
166 // next register is even.
167 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
168 "Number of Quick FPR arguments not even");
169 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
170 }
171
~QuickArgumentVisitorImpl()172 virtual ~QuickArgumentVisitorImpl() {}
173
174 virtual void Visit() = 0;
175
GetParamPrimitiveType() const176 Primitive::Type GetParamPrimitiveType() const {
177 return cur_type_;
178 }
179
GetParamAddress() const180 uint8_t* GetParamAddress() const {
181 if (!kQuickSoftFloatAbi) {
182 Primitive::Type type = GetParamPrimitiveType();
183 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
184 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
185 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
186 return fpr_args_ +
187 (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA));
188 }
189 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
190 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA));
191 }
192 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
193 }
194 }
195 if (gpr_index_ < kNumQuickGprArgs) {
196 return gpr_args_ + GprIndexToGprOffset(gpr_index_);
197 }
198 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
199 }
200
IsSplitLongOrDouble() const201 bool IsSplitLongOrDouble() const {
202 if ((GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) ||
203 (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4)) {
204 return is_split_long_or_double_;
205 } else {
206 return false; // An optimization for when GPR and FPRs are 64bit.
207 }
208 }
209
IsParamAReference() const210 bool IsParamAReference() const {
211 return GetParamPrimitiveType() == Primitive::kPrimNot;
212 }
213
IsParamALongOrDouble() const214 bool IsParamALongOrDouble() const {
215 Primitive::Type type = GetParamPrimitiveType();
216 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
217 }
218
ReadSplitLongParam() const219 uint64_t ReadSplitLongParam() const {
220 // The splitted long is always available through the stack.
221 return *reinterpret_cast<uint64_t*>(stack_args_
222 + stack_index_ * kBytesStackArgLocation);
223 }
224
IncGprIndex()225 void IncGprIndex() {
226 gpr_index_++;
227 if (kGprFprLockstep) {
228 fpr_index_++;
229 }
230 }
231
IncFprIndex()232 void IncFprIndex() {
233 fpr_index_++;
234 if (kGprFprLockstep) {
235 gpr_index_++;
236 }
237 }
238
VisitArguments()239 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
240 // (a) 'stack_args_' should point to the first method's argument
241 // (b) whatever the argument type it is, the 'stack_index_' should
242 // be moved forward along with every visiting.
243 gpr_index_ = 0;
244 fpr_index_ = 0;
245 if (kQuickDoubleRegAlignedFloatBackFilled) {
246 fpr_double_index_ = 0;
247 }
248 stack_index_ = 0;
249 if (!is_static_) { // Handle this.
250 cur_type_ = Primitive::kPrimNot;
251 is_split_long_or_double_ = false;
252 Visit();
253 stack_index_++;
254 if (kNumQuickGprArgs > 0) {
255 IncGprIndex();
256 }
257 }
258 for (char c : shorty_.substr(1u)) {
259 cur_type_ = Primitive::GetType(c);
260 switch (cur_type_) {
261 case Primitive::kPrimNot:
262 case Primitive::kPrimBoolean:
263 case Primitive::kPrimByte:
264 case Primitive::kPrimChar:
265 case Primitive::kPrimShort:
266 case Primitive::kPrimInt:
267 is_split_long_or_double_ = false;
268 Visit();
269 stack_index_++;
270 if (gpr_index_ < kNumQuickGprArgs) {
271 IncGprIndex();
272 }
273 break;
274 case Primitive::kPrimFloat:
275 is_split_long_or_double_ = false;
276 Visit();
277 stack_index_++;
278 if (kQuickSoftFloatAbi) {
279 if (gpr_index_ < kNumQuickGprArgs) {
280 IncGprIndex();
281 }
282 } else {
283 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
284 IncFprIndex();
285 if (kQuickDoubleRegAlignedFloatBackFilled) {
286 // Double should not overlap with float.
287 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
288 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
289 // Float should not overlap with double.
290 if (fpr_index_ % 2 == 0) {
291 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
292 }
293 } else if (kQuickSkipOddFpRegisters) {
294 IncFprIndex();
295 }
296 }
297 }
298 break;
299 case Primitive::kPrimDouble:
300 case Primitive::kPrimLong:
301 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
302 if (cur_type_ == Primitive::kPrimLong &&
303 gpr_index_ == 0 &&
304 kAlignPairRegister) {
305 // Currently, this is only for ARM, where we align long parameters with
306 // even-numbered registers by skipping R1 and using R2 instead.
307 IncGprIndex();
308 }
309 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) &&
310 ((gpr_index_ + 1) == kNumQuickGprArgs);
311 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
312 // We don't want to split this. Pass over this register.
313 gpr_index_++;
314 is_split_long_or_double_ = false;
315 }
316 Visit();
317 if (kBytesStackArgLocation == 4) {
318 stack_index_+= 2;
319 } else {
320 CHECK_EQ(kBytesStackArgLocation, 8U);
321 stack_index_++;
322 }
323 if (gpr_index_ < kNumQuickGprArgs) {
324 IncGprIndex();
325 if (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) {
326 if (gpr_index_ < kNumQuickGprArgs) {
327 IncGprIndex();
328 }
329 }
330 }
331 } else {
332 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) &&
333 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
334 Visit();
335 if (kBytesStackArgLocation == 4) {
336 stack_index_+= 2;
337 } else {
338 CHECK_EQ(kBytesStackArgLocation, 8U);
339 stack_index_++;
340 }
341 if (kQuickDoubleRegAlignedFloatBackFilled) {
342 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
343 fpr_double_index_ += 2;
344 // Float should not overlap with double.
345 if (fpr_index_ % 2 == 0) {
346 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
347 }
348 }
349 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
350 IncFprIndex();
351 if (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) {
352 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
353 IncFprIndex();
354 }
355 }
356 }
357 }
358 break;
359 default:
360 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
361 }
362 }
363 }
364
365 protected:
366 const bool is_static_;
367 const std::string_view shorty_;
368
369 private:
370 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
371 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
372 uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
373 uint32_t gpr_index_; // Index into spilled GPRs.
374 // Index into spilled FPRs.
375 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
376 // holds a higher register number.
377 uint32_t fpr_index_;
378 // Index into spilled FPRs for aligned double.
379 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
380 // terms of singles, may be behind fpr_index.
381 uint32_t fpr_double_index_;
382 uint32_t stack_index_; // Index into arguments on the stack.
383 // The current type of argument during VisitArguments.
384 Primitive::Type cur_type_;
385 // Does a 64bit parameter straddle the register and stack arguments?
386 bool is_split_long_or_double_;
387 };
388
389 class QuickArgumentFrameInfoARM {
390 public:
391 // The callee save frame is pointed to by SP.
392 // | argN | |
393 // | ... | |
394 // | arg4 | |
395 // | arg3 spill | | Caller's frame
396 // | arg2 spill | |
397 // | arg1 spill | |
398 // | Method* | ---
399 // | LR |
400 // | ... | 4x6 bytes callee saves
401 // | R3 |
402 // | R2 |
403 // | R1 |
404 // | S15 |
405 // | : |
406 // | S0 |
407 // | | 4x2 bytes padding
408 // | Method* | <- sp
409 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
410 static constexpr bool kAlignPairRegister = true;
411 static constexpr bool kQuickSoftFloatAbi = false;
412 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true;
413 static constexpr bool kQuickSkipOddFpRegisters = false;
414 static constexpr size_t kNumQuickGprArgs = 3;
415 static constexpr size_t kNumQuickFprArgs = 16;
416 static constexpr bool kGprFprLockstep = false;
417 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)418 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
419 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm);
420 }
421 };
422
423 class QuickArgumentFrameInfoARM64 {
424 public:
425 // The callee save frame is pointed to by SP.
426 // | argN | |
427 // | ... | |
428 // | arg4 | |
429 // | arg3 spill | | Caller's frame
430 // | arg2 spill | |
431 // | arg1 spill | |
432 // | Method* | ---
433 // | LR |
434 // | X29 |
435 // | : |
436 // | X20 |
437 // | X7 |
438 // | : |
439 // | X1 |
440 // | D7 |
441 // | : |
442 // | D0 |
443 // | | padding
444 // | Method* | <- sp
445 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
446 static constexpr bool kAlignPairRegister = false;
447 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
448 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
449 static constexpr bool kQuickSkipOddFpRegisters = false;
450 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
451 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
452 static constexpr bool kGprFprLockstep = false;
453 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)454 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
455 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm64);
456 }
457 };
458
459 class QuickArgumentFrameInfoRISCV64 {
460 public:
461 // The callee save frame is pointed to by SP.
462 // | argN | |
463 // | ... | |
464 // | reg. arg spills | | Caller's frame
465 // | Method* | ---
466 // | RA |
467 // | S11/X27 | callee-saved 11
468 // | S10/X26 | callee-saved 10
469 // | S9/X25 | callee-saved 9
470 // | S9/X24 | callee-saved 8
471 // | S7/X23 | callee-saved 7
472 // | S6/X22 | callee-saved 6
473 // | S5/X21 | callee-saved 5
474 // | S4/X20 | callee-saved 4
475 // | S3/X19 | callee-saved 3
476 // | S2/X18 | callee-saved 2
477 // | A7/X17 | arg 7
478 // | A6/X16 | arg 6
479 // | A5/X15 | arg 5
480 // | A4/X14 | arg 4
481 // | A3/X13 | arg 3
482 // | A2/X12 | arg 2
483 // | A1/X11 | arg 1 (A0 is the method => skipped)
484 // | S0/X8/FP | callee-saved 0 (S1 is TR => skipped)
485 // | FA7 | float arg 8
486 // | FA6 | float arg 7
487 // | FA5 | float arg 6
488 // | FA4 | float arg 5
489 // | FA3 | float arg 4
490 // | FA2 | float arg 3
491 // | FA1 | float arg 2
492 // | FA0 | float arg 1
493 // | A0/Method* | <- sp
494 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
495 static constexpr bool kAlignPairRegister = false;
496 static constexpr bool kQuickSoftFloatAbi = false;
497 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
498 static constexpr bool kQuickSkipOddFpRegisters = false;
499 static constexpr size_t kNumQuickGprArgs = 7;
500 static constexpr size_t kNumQuickFprArgs = 8;
501 static constexpr bool kGprFprLockstep = false;
502 static constexpr bool kNaNBoxing = true;
GprIndexToGprOffsetImpl(uint32_t gpr_index)503 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
504 // skip S0/X8/FP
505 return (gpr_index + 1) * GetBytesPerGprSpillLocation(InstructionSet::kRiscv64);
506 }
507 };
508
509 class QuickArgumentFrameInfoX86 {
510 public:
511 // The callee save frame is pointed to by SP.
512 // | argN | |
513 // | ... | |
514 // | arg4 | |
515 // | arg3 spill | | Caller's frame
516 // | arg2 spill | |
517 // | arg1 spill | |
518 // | Method* | ---
519 // | Return |
520 // | EBP,ESI,EDI | callee saves
521 // | EBX | arg3
522 // | EDX | arg2
523 // | ECX | arg1
524 // | XMM3 | float arg 4
525 // | XMM2 | float arg 3
526 // | XMM1 | float arg 2
527 // | XMM0 | float arg 1
528 // | EAX/Method* | <- sp
529 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
530 static constexpr bool kAlignPairRegister = false;
531 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
532 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
533 static constexpr bool kQuickSkipOddFpRegisters = false;
534 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
535 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
536 static constexpr bool kGprFprLockstep = false;
537 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)538 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
539 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kX86);
540 }
541 };
542
543 class QuickArgumentFrameInfoX86_64 {
544 public:
545 // The callee save frame is pointed to by SP.
546 // | argN | |
547 // | ... | |
548 // | reg. arg spills | | Caller's frame
549 // | Method* | ---
550 // | Return |
551 // | R15 | callee save
552 // | R14 | callee save
553 // | R13 | callee save
554 // | R12 | callee save
555 // | R9 | arg5
556 // | R8 | arg4
557 // | RSI/R6 | arg1
558 // | RBP/R5 | callee save
559 // | RBX/R3 | callee save
560 // | RDX/R2 | arg2
561 // | RCX/R1 | arg3
562 // | XMM15 | callee save
563 // | XMM14 | callee save
564 // | XMM13 | callee save
565 // | XMM12 | callee save
566 // | XMM7 | float arg 8
567 // | XMM6 | float arg 7
568 // | XMM5 | float arg 6
569 // | XMM4 | float arg 5
570 // | XMM3 | float arg 4
571 // | XMM2 | float arg 3
572 // | XMM1 | float arg 2
573 // | XMM0 | float arg 1
574 // | Padding |
575 // | RDI/Method* | <- sp
576 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
577 static constexpr bool kAlignPairRegister = false;
578 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
579 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
580 static constexpr bool kQuickSkipOddFpRegisters = false;
581 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
582 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
583 static constexpr bool kGprFprLockstep = false;
584 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)585 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
586 static constexpr size_t kBytesPerSpill = GetBytesPerGprSpillLocation(InstructionSet::kX86_64);
587 switch (gpr_index) {
588 case 0: return (4 * kBytesPerSpill);
589 case 1: return (1 * kBytesPerSpill);
590 case 2: return (0 * kBytesPerSpill);
591 case 3: return (5 * kBytesPerSpill);
592 case 4: return (6 * kBytesPerSpill);
593 default:
594 LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
595 UNREACHABLE();
596 }
597 }
598 };
599
600 namespace detail {
601
602 template <InstructionSet>
603 struct QAFISelector;
604
605 template <>
606 struct QAFISelector<InstructionSet::kArm> { using type = QuickArgumentFrameInfoARM; };
607 template <>
608 struct QAFISelector<InstructionSet::kArm64> { using type = QuickArgumentFrameInfoARM64; };
609 template <>
610 struct QAFISelector<InstructionSet::kRiscv64> { using type = QuickArgumentFrameInfoRISCV64; };
611 template <>
612 struct QAFISelector<InstructionSet::kX86> { using type = QuickArgumentFrameInfoX86; };
613 template <>
614 struct QAFISelector<InstructionSet::kX86_64> { using type = QuickArgumentFrameInfoX86_64; };
615
616 } // namespace detail
617
618 using QuickArgumentVisitor =
619 QuickArgumentVisitorImpl<detail::QAFISelector<kRuntimeQuickCodeISA>::type>;
620
621 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
622 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
artQuickGetProxyThisObject(ArtMethod ** sp)623 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
624 REQUIRES_SHARED(Locks::mutator_lock_) {
625 DCHECK((*sp)->IsProxyMethod());
626 return QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr();
627 }
628
629 // Visits arguments on the stack placing them into the shadow frame.
630 class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
631 public:
BuildQuickShadowFrameVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ShadowFrame * sf,size_t first_arg_reg)632 BuildQuickShadowFrameVisitor(ArtMethod** sp,
633 bool is_static,
634 std::string_view shorty,
635 ShadowFrame* sf,
636 size_t first_arg_reg)
637 : QuickArgumentVisitor(sp, is_static, shorty), sf_(sf), cur_reg_(first_arg_reg) {}
638
639 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
640 void SetReceiver(ObjPtr<mirror::Object> receiver) REQUIRES_SHARED(Locks::mutator_lock_);
641
642 private:
643 ShadowFrame* const sf_;
644 uint32_t cur_reg_;
645
646 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
647 };
648
SetReceiver(ObjPtr<mirror::Object> receiver)649 void BuildQuickShadowFrameVisitor::SetReceiver(ObjPtr<mirror::Object> receiver) {
650 DCHECK_EQ(cur_reg_, 0u);
651 sf_->SetVRegReference(cur_reg_, receiver);
652 ++cur_reg_;
653 }
654
Visit()655 void BuildQuickShadowFrameVisitor::Visit() {
656 Primitive::Type type = GetParamPrimitiveType();
657 switch (type) {
658 case Primitive::kPrimLong: // Fall-through.
659 case Primitive::kPrimDouble:
660 if (IsSplitLongOrDouble()) {
661 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
662 } else {
663 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
664 }
665 ++cur_reg_;
666 break;
667 case Primitive::kPrimNot: {
668 StackReference<mirror::Object>* stack_ref =
669 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
670 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
671 }
672 break;
673 case Primitive::kPrimBoolean: // Fall-through.
674 case Primitive::kPrimByte: // Fall-through.
675 case Primitive::kPrimChar: // Fall-through.
676 case Primitive::kPrimShort: // Fall-through.
677 case Primitive::kPrimInt: // Fall-through.
678 case Primitive::kPrimFloat:
679 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
680 break;
681 case Primitive::kPrimVoid:
682 LOG(FATAL) << "UNREACHABLE";
683 UNREACHABLE();
684 }
685 ++cur_reg_;
686 }
687
688 // Don't inline. See b/65159206.
689 NO_INLINE
HandleDeoptimization(JValue * result,ArtMethod * method,ShadowFrame * deopt_frame,ManagedStack * fragment)690 static void HandleDeoptimization(JValue* result,
691 ArtMethod* method,
692 ShadowFrame* deopt_frame,
693 ManagedStack* fragment)
694 REQUIRES_SHARED(Locks::mutator_lock_) {
695 // Coming from partial-fragment deopt.
696 Thread* self = Thread::Current();
697 if (kIsDebugBuild) {
698 // Consistency-check: are the methods as expected? We check that the last shadow frame
699 // (the bottom of the call-stack) corresponds to the called method.
700 ShadowFrame* linked = deopt_frame;
701 while (linked->GetLink() != nullptr) {
702 linked = linked->GetLink();
703 }
704 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " "
705 << ArtMethod::PrettyMethod(linked->GetMethod());
706 }
707
708 if (VLOG_IS_ON(deopt)) {
709 // Print out the stack to verify that it was a partial-fragment deopt.
710 LOG(INFO) << "Continue-ing from deopt. Stack is:";
711 QuickExceptionHandler::DumpFramesWithType(self, true);
712 }
713
714 ObjPtr<mirror::Throwable> pending_exception;
715 bool from_code = false;
716 DeoptimizationMethodType method_type;
717 self->PopDeoptimizationContext(/* out */ result,
718 /* out */ &pending_exception,
719 /* out */ &from_code,
720 /* out */ &method_type);
721
722 // Push a transition back into managed code onto the linked list in thread.
723 self->PushManagedStackFragment(fragment);
724
725 // Ensure that the stack is still in order.
726 if (kIsDebugBuild) {
727 class EntireStackVisitor : public StackVisitor {
728 public:
729 explicit EntireStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
730 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
731
732 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
733 // Nothing to do here. In a debug build, ValidateFrame will do the work in the walking
734 // logic. Just always say we want to continue.
735 return true;
736 }
737 };
738 EntireStackVisitor esv(self);
739 esv.WalkStack();
740 }
741
742 // Restore the exception that was pending before deoptimization then interpret the
743 // deoptimized frames.
744 if (pending_exception != nullptr) {
745 self->SetException(pending_exception);
746 }
747 interpreter::EnterInterpreterFromDeoptimize(self,
748 deopt_frame,
749 result,
750 from_code,
751 method_type);
752 }
753
NanBoxResultIfNeeded(int64_t result,char result_shorty)754 static int64_t NanBoxResultIfNeeded(int64_t result, char result_shorty) {
755 return (QuickArgumentVisitor::NaNBoxing() && result_shorty == 'F')
756 ? result | UINT64_C(0xffffffff00000000)
757 : result;
758 }
759
760 NO_STACK_PROTECTOR
artQuickToInterpreterBridge(ArtMethod * method,Thread * self,ArtMethod ** sp)761 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
762 REQUIRES_SHARED(Locks::mutator_lock_) {
763 // Ensure we don't get thread suspension until the object arguments are safely in the shadow
764 // frame.
765 ScopedQuickEntrypointChecks sqec(self);
766
767 if (UNLIKELY(!method->IsInvokable())) {
768 method->ThrowInvocationTimeError(
769 method->IsStatic()
770 ? nullptr
771 : QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr());
772 return 0;
773 }
774
775 DCHECK(!method->IsNative()) << method->PrettyMethod();
776
777 JValue result;
778
779 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
780 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
781 std::string_view shorty = non_proxy_method->GetShortyView();
782
783 ManagedStack fragment;
784 ShadowFrame* deopt_frame = self->MaybePopDeoptimizedStackedShadowFrame();
785 if (UNLIKELY(deopt_frame != nullptr)) {
786 HandleDeoptimization(&result, method, deopt_frame, &fragment);
787 } else {
788 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
789 const char* old_cause = self->StartAssertNoThreadSuspension(
790 "Building interpreter shadow frame");
791 uint16_t num_regs = accessor.RegistersSize();
792 // No last shadow coming from quick.
793 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
794 CREATE_SHADOW_FRAME(num_regs, method, /* dex_pc= */ 0);
795 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
796 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
797 BuildQuickShadowFrameVisitor shadow_frame_builder(
798 sp, method->IsStatic(), shorty, shadow_frame, first_arg_reg);
799 shadow_frame_builder.VisitArguments();
800 self->EndAssertNoThreadSuspension(old_cause);
801
802 // Potentially run <clinit> before pushing the shadow frame. We do not want
803 // to have the called method on the stack if there is an exception.
804 if (!EnsureInitialized(self, shadow_frame)) {
805 DCHECK(self->IsExceptionPending());
806 return 0;
807 }
808
809 // Push a transition back into managed code onto the linked list in thread.
810 self->PushManagedStackFragment(&fragment);
811 self->PushShadowFrame(shadow_frame);
812 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
813 }
814
815 // Pop transition.
816 self->PopManagedStackFragment(fragment);
817
818 // Check if caller needs to be deoptimized for instrumentation reasons.
819 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
820 if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) {
821 ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp);
822 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
823 DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc));
824 DCHECK(caller != nullptr);
825 DCHECK(self->GetException() != Thread::GetDeoptimizationException());
826 // Push the context of the deoptimization stack so we can restore the return value and the
827 // exception before executing the deoptimized frames.
828 self->PushDeoptimizationContext(result,
829 shorty[0] == 'L' || shorty[0] == '[', // class or array
830 self->GetException(),
831 /* from_code= */ false,
832 DeoptimizationMethodType::kDefault);
833
834 // Set special exception to cause deoptimization.
835 self->SetException(Thread::GetDeoptimizationException());
836 }
837
838 // No need to restore the args since the method has already been run by the interpreter.
839 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
840 }
841
842 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
843 // to jobjects.
844 class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
845 public:
BuildQuickArgumentVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ScopedObjectAccessUnchecked * soa,std::vector<jvalue> * args)846 BuildQuickArgumentVisitor(ArtMethod** sp,
847 bool is_static,
848 std::string_view shorty,
849 ScopedObjectAccessUnchecked* soa,
850 std::vector<jvalue>* args)
851 : QuickArgumentVisitor(sp, is_static, shorty), soa_(soa), args_(args) {}
852
853 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
854
855 private:
856 ScopedObjectAccessUnchecked* const soa_;
857 std::vector<jvalue>* const args_;
858
859 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
860 };
861
Visit()862 void BuildQuickArgumentVisitor::Visit() {
863 jvalue val;
864 Primitive::Type type = GetParamPrimitiveType();
865 switch (type) {
866 case Primitive::kPrimNot: {
867 StackReference<mirror::Object>* stack_ref =
868 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
869 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
870 break;
871 }
872 case Primitive::kPrimLong: // Fall-through.
873 case Primitive::kPrimDouble:
874 if (IsSplitLongOrDouble()) {
875 val.j = ReadSplitLongParam();
876 } else {
877 val.j = *reinterpret_cast<jlong*>(GetParamAddress());
878 }
879 break;
880 case Primitive::kPrimBoolean: // Fall-through.
881 case Primitive::kPrimByte: // Fall-through.
882 case Primitive::kPrimChar: // Fall-through.
883 case Primitive::kPrimShort: // Fall-through.
884 case Primitive::kPrimInt: // Fall-through.
885 case Primitive::kPrimFloat:
886 val.i = *reinterpret_cast<jint*>(GetParamAddress());
887 break;
888 case Primitive::kPrimVoid:
889 LOG(FATAL) << "UNREACHABLE";
890 UNREACHABLE();
891 }
892 args_->push_back(val);
893 }
894
895 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
896 // which is responsible for recording callee save registers. We explicitly place into jobjects the
897 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
898 // field within the proxy object, which will box the primitive arguments and deal with error cases.
artQuickProxyInvokeHandler(ArtMethod * proxy_method,mirror::Object * receiver,Thread * self,ArtMethod ** sp)899 extern "C" uint64_t artQuickProxyInvokeHandler(
900 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
901 REQUIRES_SHARED(Locks::mutator_lock_) {
902 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod();
903 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod();
904 // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
905 const char* old_cause =
906 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
907 // Register the top of the managed stack, making stack crawlable.
908 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod();
909 self->VerifyStack();
910 // Start new JNI local reference state.
911 JNIEnvExt* env = self->GetJniEnv();
912 ScopedObjectAccessUnchecked soa(env);
913 ScopedJniEnvLocalRefState env_state(env);
914 // Create local ref. copies of proxy method and the receiver.
915 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
916
917 // Placing arguments into args vector and remove the receiver.
918 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
919 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " "
920 << non_proxy_method->PrettyMethod();
921 std::vector<jvalue> args;
922 uint32_t shorty_len = 0;
923 const char* raw_shorty = non_proxy_method->GetShorty(&shorty_len);
924 std::string_view shorty(raw_shorty, shorty_len);
925 BuildQuickArgumentVisitor local_ref_visitor(sp, /* is_static= */ false, shorty, &soa, &args);
926
927 local_ref_visitor.VisitArguments();
928 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
929 args.erase(args.begin());
930
931 // Convert proxy method into expected interface method.
932 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
933 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod();
934 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod();
935 self->EndAssertNoThreadSuspension(old_cause);
936 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
937 DCHECK(!Runtime::Current()->IsActiveTransaction());
938 ObjPtr<mirror::Method> interface_reflect_method =
939 mirror::Method::CreateFromArtMethod<kRuntimePointerSize>(soa.Self(), interface_method);
940 if (interface_reflect_method == nullptr) {
941 soa.Self()->AssertPendingOOMException();
942 return 0;
943 }
944 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method);
945
946 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
947 // that performs allocations or instrumentation events.
948 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
949 if (instr->HasMethodEntryListeners()) {
950 instr->MethodEnterEvent(soa.Self(), proxy_method);
951 if (soa.Self()->IsExceptionPending()) {
952 instr->MethodUnwindEvent(self,
953 proxy_method,
954 0);
955 return 0;
956 }
957 }
958 JValue result =
959 InvokeProxyInvocationHandler(soa, raw_shorty, rcvr_jobj, interface_method_jobj, args);
960 if (soa.Self()->IsExceptionPending()) {
961 if (instr->HasMethodUnwindListeners()) {
962 instr->MethodUnwindEvent(self,
963 proxy_method,
964 0);
965 }
966 } else if (instr->HasMethodExitListeners()) {
967 instr->MethodExitEvent(self,
968 proxy_method,
969 {},
970 result);
971 }
972
973 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
974 }
975
976 // Visitor returning a reference argument at a given position in a Quick stack frame.
977 // NOTE: Only used for testing purposes.
978 class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
979 public:
GetQuickReferenceArgumentAtVisitor(ArtMethod ** sp,std::string_view shorty,size_t arg_pos)980 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp, std::string_view shorty, size_t arg_pos)
981 : QuickArgumentVisitor(sp, /* is_static= */ false, shorty),
982 cur_pos_(0u),
983 arg_pos_(arg_pos),
984 ref_arg_(nullptr) {
985 CHECK_LT(arg_pos, shorty.length()) << "Argument position greater than the number arguments";
986 }
987
Visit()988 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
989 if (cur_pos_ == arg_pos_) {
990 Primitive::Type type = GetParamPrimitiveType();
991 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
992 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
993 }
994 ++cur_pos_;
995 }
996
GetReferenceArgument()997 StackReference<mirror::Object>* GetReferenceArgument() {
998 return ref_arg_;
999 }
1000
1001 private:
1002 // The position of the currently visited argument.
1003 size_t cur_pos_;
1004 // The position of the searched argument.
1005 const size_t arg_pos_;
1006 // The reference argument, if found.
1007 StackReference<mirror::Object>* ref_arg_;
1008
1009 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor);
1010 };
1011
1012 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`.
1013 // NOTE: Only used for testing purposes.
artQuickGetProxyReferenceArgumentAt(size_t arg_pos,ArtMethod ** sp)1014 EXPORT extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(
1015 size_t arg_pos, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1016 ArtMethod* proxy_method = *sp;
1017 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1018 CHECK(!non_proxy_method->IsStatic())
1019 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
1020 std::string_view shorty = non_proxy_method->GetShortyView();
1021 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, arg_pos);
1022 ref_arg_visitor.VisitArguments();
1023 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument();
1024 return ref_arg;
1025 }
1026
1027 // Visitor returning all the reference arguments in a Quick stack frame.
1028 class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
1029 public:
GetQuickReferenceArgumentsVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty)1030 GetQuickReferenceArgumentsVisitor(ArtMethod** sp, bool is_static, std::string_view shorty)
1031 : QuickArgumentVisitor(sp, is_static, shorty) {}
1032
Visit()1033 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
1034 Primitive::Type type = GetParamPrimitiveType();
1035 if (type == Primitive::kPrimNot) {
1036 StackReference<mirror::Object>* ref_arg =
1037 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1038 ref_args_.push_back(ref_arg);
1039 }
1040 }
1041
GetReferenceArguments()1042 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() {
1043 return ref_args_;
1044 }
1045
1046 private:
1047 // The reference arguments.
1048 std::vector<StackReference<mirror::Object>*> ref_args_;
1049
1050 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor);
1051 };
1052
1053 // Returning all reference arguments in Quick stack frame at address `sp`.
GetProxyReferenceArguments(ArtMethod ** sp)1054 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
1055 REQUIRES_SHARED(Locks::mutator_lock_) {
1056 ArtMethod* proxy_method = *sp;
1057 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1058 CHECK(!non_proxy_method->IsStatic())
1059 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
1060 std::string_view shorty = non_proxy_method->GetShortyView();
1061 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty);
1062 ref_args_visitor.VisitArguments();
1063 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
1064 return ref_args;
1065 }
1066
1067 // Read object references held in arguments from quick frames and place in a JNI local references,
1068 // so they don't get garbage collected.
1069 class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
1070 public:
RememberForGcArgumentVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ScopedObjectAccessUnchecked * soa)1071 RememberForGcArgumentVisitor(ArtMethod** sp,
1072 bool is_static,
1073 std::string_view shorty,
1074 ScopedObjectAccessUnchecked* soa)
1075 : QuickArgumentVisitor(sp, is_static, shorty), soa_(soa) {}
1076
1077 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1078
1079 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
1080
1081 private:
1082 ScopedObjectAccessUnchecked* const soa_;
1083 // References which we must update when exiting in case the GC moved the objects.
1084 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
1085
1086 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
1087 };
1088
Visit()1089 void RememberForGcArgumentVisitor::Visit() {
1090 if (IsParamAReference()) {
1091 StackReference<mirror::Object>* stack_ref =
1092 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1093 jobject reference =
1094 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
1095 references_.push_back(std::make_pair(reference, stack_ref));
1096 }
1097 }
1098
FixupReferences()1099 void RememberForGcArgumentVisitor::FixupReferences() {
1100 // Fixup any references which may have changed.
1101 for (const auto& pair : references_) {
1102 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
1103 soa_->Env()->DeleteLocalRef(pair.first);
1104 }
1105 }
1106
DumpInstruction(ArtMethod * method,uint32_t dex_pc)1107 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc)
1108 REQUIRES_SHARED(Locks::mutator_lock_) {
1109 if (dex_pc == static_cast<uint32_t>(-1)) {
1110 CHECK(method == WellKnownClasses::java_lang_String_charAt);
1111 return "<native>";
1112 } else {
1113 CodeItemInstructionAccessor accessor = method->DexInstructions();
1114 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1115 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile());
1116 }
1117 }
1118
DumpB74410240ClassData(ObjPtr<mirror::Class> klass)1119 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass)
1120 REQUIRES_SHARED(Locks::mutator_lock_) {
1121 std::string storage;
1122 const char* descriptor = klass->GetDescriptor(&storage);
1123 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor);
1124 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile();
1125 if (oat_dex_file != nullptr) {
1126 const OatFile* oat_file = oat_dex_file->GetOatFile();
1127 const char* dex2oat_cmdline =
1128 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
1129 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation()
1130 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>");
1131 }
1132 }
1133
DumpB74410240DebugData(ArtMethod ** sp)1134 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1135 // Mimick the search for the caller and dump some data while doing so.
1136 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240.";
1137
1138 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
1139 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
1140
1141 constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
1142 auto** caller_sp = reinterpret_cast<ArtMethod**>(
1143 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
1144 constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
1145 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
1146 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
1147 ArtMethod* outer_method = *caller_sp;
1148
1149 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
1150 CHECK(current_code != nullptr);
1151 CHECK(current_code->IsOptimized());
1152 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
1153 CodeInfo code_info(current_code);
1154 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
1155 CHECK(stack_map.IsValid());
1156 uint32_t dex_pc = stack_map.GetDexPc();
1157
1158 // Log the outer method and its associated dex file and class table pointer which can be used
1159 // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
1160 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1161 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod()
1162 << " native pc: " << caller_pc
1163 << " dex pc: " << dex_pc
1164 << " dex file: " << outer_method->GetDexFile()->GetLocation()
1165 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader());
1166 DumpB74410240ClassData(outer_method->GetDeclaringClass());
1167 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
1168
1169 ArtMethod* caller = outer_method;
1170 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
1171 for (InlineInfo inline_info : inline_infos) {
1172 const char* tag = "";
1173 dex_pc = inline_info.GetDexPc();
1174 if (inline_info.EncodesArtMethod()) {
1175 tag = "encoded ";
1176 caller = inline_info.GetArtMethod();
1177 } else {
1178 uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
1179 if (dex_pc == static_cast<uint32_t>(-1)) {
1180 tag = "special ";
1181 CHECK(inline_info.Equals(inline_infos.back()));
1182 caller = WellKnownClasses::java_lang_String_charAt;
1183 CHECK_EQ(caller->GetDexMethodIndex(), method_index);
1184 } else {
1185 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
1186 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
1187 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
1188 CHECK(caller != nullptr);
1189 }
1190 }
1191 LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
1192 << ": " << tag << caller->PrettyMethod()
1193 << " dex pc: " << dex_pc
1194 << " dex file: " << caller->GetDexFile()->GetLocation()
1195 << " class table: "
1196 << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
1197 DumpB74410240ClassData(caller->GetDeclaringClass());
1198 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
1199 }
1200 }
1201
1202 // Lazily resolve a method for quick. Called by stub code.
artQuickResolutionTrampoline(ArtMethod * called,mirror::Object * receiver,Thread * self,ArtMethod ** sp)1203 extern "C" const void* artQuickResolutionTrampoline(
1204 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
1205 REQUIRES_SHARED(Locks::mutator_lock_) {
1206 // The resolution trampoline stashes the resolved method into the callee-save frame to transport
1207 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
1208 // does not have the same stack layout as the callee-save method).
1209 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1210 // Start new JNI local reference state
1211 JNIEnvExt* env = self->GetJniEnv();
1212 ScopedObjectAccessUnchecked soa(env);
1213 ScopedJniEnvLocalRefState env_state(env);
1214 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
1215
1216 // Compute details about the called method (avoid GCs)
1217 ClassLinker* linker = Runtime::Current()->GetClassLinker();
1218 InvokeType invoke_type;
1219 MethodReference called_method(nullptr, 0);
1220 const bool called_method_known_on_entry = !called->IsRuntimeMethod();
1221 ArtMethod* caller = nullptr;
1222 if (!called_method_known_on_entry) {
1223 uint32_t dex_pc;
1224 caller = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
1225 called_method.dex_file = caller->GetDexFile();
1226
1227 {
1228 CodeItemInstructionAccessor accessor(caller->DexInstructions());
1229 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1230 const Instruction& instr = accessor.InstructionAt(dex_pc);
1231 Instruction::Code instr_code = instr.Opcode();
1232 bool is_range;
1233 switch (instr_code) {
1234 case Instruction::INVOKE_DIRECT:
1235 invoke_type = kDirect;
1236 is_range = false;
1237 break;
1238 case Instruction::INVOKE_DIRECT_RANGE:
1239 invoke_type = kDirect;
1240 is_range = true;
1241 break;
1242 case Instruction::INVOKE_STATIC:
1243 invoke_type = kStatic;
1244 is_range = false;
1245 break;
1246 case Instruction::INVOKE_STATIC_RANGE:
1247 invoke_type = kStatic;
1248 is_range = true;
1249 break;
1250 case Instruction::INVOKE_SUPER:
1251 invoke_type = kSuper;
1252 is_range = false;
1253 break;
1254 case Instruction::INVOKE_SUPER_RANGE:
1255 invoke_type = kSuper;
1256 is_range = true;
1257 break;
1258 case Instruction::INVOKE_VIRTUAL:
1259 invoke_type = kVirtual;
1260 is_range = false;
1261 break;
1262 case Instruction::INVOKE_VIRTUAL_RANGE:
1263 invoke_type = kVirtual;
1264 is_range = true;
1265 break;
1266 case Instruction::INVOKE_INTERFACE:
1267 invoke_type = kInterface;
1268 is_range = false;
1269 break;
1270 case Instruction::INVOKE_INTERFACE_RANGE:
1271 invoke_type = kInterface;
1272 is_range = true;
1273 break;
1274 default:
1275 DumpB74410240DebugData(sp);
1276 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr);
1277 UNREACHABLE();
1278 }
1279 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c();
1280 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
1281 << called_method.index;
1282 }
1283 } else {
1284 invoke_type = kStatic;
1285 called_method.dex_file = called->GetDexFile();
1286 called_method.index = called->GetDexMethodIndex();
1287 }
1288 std::string_view shorty =
1289 called_method.dex_file->GetMethodShortyView(called_method.GetMethodId());
1290 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, &soa);
1291 visitor.VisitArguments();
1292 self->EndAssertNoThreadSuspension(old_cause);
1293 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1294 // Resolve method filling in dex cache.
1295 if (!called_method_known_on_entry) {
1296 StackHandleScope<1> hs(self);
1297 mirror::Object* fake_receiver = nullptr;
1298 HandleWrapper<mirror::Object> h_receiver(
1299 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &fake_receiver));
1300 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1301 called = linker->ResolveMethodWithChecks(called_method.index, caller, invoke_type);
1302 }
1303 const void* code = nullptr;
1304 if (LIKELY(!self->IsExceptionPending())) {
1305 // Incompatible class change should have been handled in resolve method.
1306 CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1307 << called->PrettyMethod() << " " << invoke_type;
1308 if (virtual_or_interface || invoke_type == kSuper) {
1309 // Refine called method based on receiver for kVirtual/kInterface, and
1310 // caller for kSuper.
1311 ArtMethod* orig_called = called;
1312 if (invoke_type == kVirtual) {
1313 CHECK(receiver != nullptr) << invoke_type;
1314 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
1315 } else if (invoke_type == kInterface) {
1316 CHECK(receiver != nullptr) << invoke_type;
1317 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
1318 } else {
1319 DCHECK_EQ(invoke_type, kSuper);
1320 CHECK(caller != nullptr) << invoke_type;
1321 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType(
1322 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller);
1323 if (ref_class->IsInterface()) {
1324 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
1325 } else {
1326 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1327 called->GetMethodIndex(), kRuntimePointerSize);
1328 }
1329 }
1330
1331 CHECK(called != nullptr) << orig_called->PrettyMethod() << " "
1332 << mirror::Object::PrettyTypeOf(receiver) << " "
1333 << invoke_type << " " << orig_called->GetVtableIndex();
1334 }
1335 // Now that we know the actual target, update .bss entry in oat file, if
1336 // any.
1337 if (!called_method_known_on_entry) {
1338 // We only put non copied methods in the BSS. Putting a copy can lead to an
1339 // odd situation where the ArtMethod being executed is unrelated to the
1340 // receiver of the method.
1341 called = called->GetCanonicalMethod();
1342 if (invoke_type == kSuper || invoke_type == kInterface || invoke_type == kVirtual) {
1343 if (called->GetDexFile() == called_method.dex_file) {
1344 called_method.index = called->GetDexMethodIndex();
1345 } else {
1346 called_method.index = called->FindDexMethodIndexInOtherDexFile(
1347 *called_method.dex_file, called_method.index);
1348 DCHECK_NE(called_method.index, dex::kDexNoIndex);
1349 }
1350 }
1351 ArtMethod* outer_method = QuickArgumentVisitor::GetOuterMethod(sp);
1352 MaybeUpdateBssMethodEntry(called, called_method, outer_method);
1353 }
1354
1355 // Static invokes need class initialization check but instance invokes can proceed even if
1356 // the class is erroneous, i.e. in the edge case of escaping instances of erroneous classes.
1357 bool success = true;
1358 if (called->StillNeedsClinitCheck()) {
1359 // Ensure that the called method's class is initialized.
1360 StackHandleScope<1> hs(soa.Self());
1361 Handle<mirror::Class> h_called_class = hs.NewHandle(called->GetDeclaringClass());
1362 success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
1363 }
1364 if (success) {
1365 // When the clinit check is at entry of the AOT/nterp code, we do the clinit check
1366 // before doing the suspend check. To ensure the code sees the latest
1367 // version of the class (the code doesn't do a read barrier to reduce
1368 // size), do a suspend check now.
1369 self->CheckSuspend();
1370 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1371 // Check if we need instrumented code here. Since resolution stubs could suspend, it is
1372 // possible that we instrumented the entry points after we started executing the resolution
1373 // stub.
1374 code = instrumentation->GetMaybeInstrumentedCodeForInvoke(called);
1375 } else {
1376 DCHECK(called->GetDeclaringClass()->IsErroneous());
1377 DCHECK(self->IsExceptionPending());
1378 }
1379 }
1380 CHECK_EQ(code == nullptr, self->IsExceptionPending());
1381 // Fixup any locally saved objects may have moved during a GC.
1382 visitor.FixupReferences();
1383 // Place called method in callee-save frame to be placed as first argument to quick method.
1384 *sp = called;
1385
1386 return code;
1387 }
1388
1389 /*
1390 * This class uses a couple of observations to unite the different calling conventions through
1391 * a few constants.
1392 *
1393 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1394 * possible alignment.
1395 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1396 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1397 * when we have to split things
1398 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1399 * and we can use Int handling directly.
1400 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1401 * necessary when widening. Also, widening of Ints will take place implicitly, and the
1402 * extension should be compatible with Aarch64, which mandates copying the available bits
1403 * into LSB and leaving the rest unspecified.
1404 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1405 * the stack.
1406 * 6) There is only little endian.
1407 *
1408 *
1409 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1410 * follows:
1411 *
1412 * void PushGpr(uintptr_t): Add a value for the next GPR
1413 *
1414 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need
1415 * padding, that is, think the architecture is 32b and aligns 64b.
1416 *
1417 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to
1418 * split this if necessary. The current state will have aligned, if
1419 * necessary.
1420 *
1421 * void PushStack(uintptr_t): Push a value to the stack.
1422 */
1423 template<class T> class BuildNativeCallFrameStateMachine {
1424 public:
1425 static constexpr bool kNaNBoxing = QuickArgumentVisitor::NaNBoxing();
1426 #if defined(__arm__)
1427 static constexpr bool kNativeSoftFloatAbi = true;
1428 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1429 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3
1430 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1431
1432 static constexpr size_t kRegistersNeededForLong = 2;
1433 static constexpr size_t kRegistersNeededForDouble = 2;
1434 static constexpr bool kMultiRegistersAligned = true;
1435 static constexpr bool kMultiGPRegistersWidened = false;
1436 static constexpr bool kAlignLongOnStack = true;
1437 static constexpr bool kAlignDoubleOnStack = true;
1438 #elif defined(__aarch64__)
1439 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1440 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1441 static constexpr size_t kNumNativeGprArgs = 8; // 8 arguments passed in GPRs.
1442 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1443
1444 static constexpr size_t kRegistersNeededForLong = 1;
1445 static constexpr size_t kRegistersNeededForDouble = 1;
1446 static constexpr bool kMultiRegistersAligned = false;
1447 static constexpr bool kMultiGPRegistersWidened = false;
1448 static constexpr bool kAlignLongOnStack = false;
1449 static constexpr bool kAlignDoubleOnStack = false;
1450 #elif defined(__riscv)
1451 static constexpr bool kNativeSoftFloatAbi = false;
1452 static constexpr bool kNativeSoftFloatAfterHardFloat = true;
1453 static constexpr size_t kNumNativeGprArgs = 8;
1454 static constexpr size_t kNumNativeFprArgs = 8;
1455
1456 static constexpr size_t kRegistersNeededForLong = 1;
1457 static constexpr size_t kRegistersNeededForDouble = 1;
1458 static constexpr bool kMultiRegistersAligned = false;
1459 static constexpr bool kMultiGPRegistersWidened = true;
1460 static constexpr bool kAlignLongOnStack = false;
1461 static constexpr bool kAlignDoubleOnStack = false;
1462 #elif defined(__i386__)
1463 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
1464 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1465 static constexpr size_t kNumNativeGprArgs = 0; // 0 arguments passed in GPRs.
1466 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1467
1468 static constexpr size_t kRegistersNeededForLong = 2;
1469 static constexpr size_t kRegistersNeededForDouble = 2;
1470 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
1471 static constexpr bool kMultiGPRegistersWidened = false;
1472 static constexpr bool kAlignLongOnStack = false;
1473 static constexpr bool kAlignDoubleOnStack = false;
1474 #elif defined(__x86_64__)
1475 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1476 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1477 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
1478 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1479
1480 static constexpr size_t kRegistersNeededForLong = 1;
1481 static constexpr size_t kRegistersNeededForDouble = 1;
1482 static constexpr bool kMultiRegistersAligned = false;
1483 static constexpr bool kMultiGPRegistersWidened = false;
1484 static constexpr bool kAlignLongOnStack = false;
1485 static constexpr bool kAlignDoubleOnStack = false;
1486 #else
1487 #error "Unsupported architecture"
1488 #endif
1489
1490 public:
BuildNativeCallFrameStateMachine(T * delegate)1491 explicit BuildNativeCallFrameStateMachine(T* delegate)
1492 : gpr_index_(kNumNativeGprArgs),
1493 fpr_index_(kNumNativeFprArgs),
1494 stack_entries_(0),
1495 delegate_(delegate) {
1496 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1497 // the next register is even; counting down is just to make the compiler happy...
1498 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1499 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1500 }
1501
~BuildNativeCallFrameStateMachine()1502 virtual ~BuildNativeCallFrameStateMachine() {}
1503
HavePointerGpr() const1504 bool HavePointerGpr() const {
1505 return gpr_index_ > 0;
1506 }
1507
AdvancePointer(const void * val)1508 void AdvancePointer(const void* val) {
1509 if (HavePointerGpr()) {
1510 gpr_index_--;
1511 PushGpr(reinterpret_cast<uintptr_t>(val));
1512 } else {
1513 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b
1514 PushStack(reinterpret_cast<uintptr_t>(val));
1515 gpr_index_ = 0;
1516 }
1517 }
1518
HaveIntGpr() const1519 bool HaveIntGpr() const {
1520 return gpr_index_ > 0;
1521 }
1522
AdvanceInt(uint32_t val)1523 void AdvanceInt(uint32_t val) {
1524 if (HaveIntGpr()) {
1525 gpr_index_--;
1526 if (kMultiGPRegistersWidened) {
1527 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1528 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1529 } else {
1530 PushGpr(val);
1531 }
1532 } else {
1533 stack_entries_++;
1534 if (kMultiGPRegistersWidened) {
1535 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1536 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1537 } else {
1538 PushStack(val);
1539 }
1540 gpr_index_ = 0;
1541 }
1542 }
1543
HaveLongGpr() const1544 bool HaveLongGpr() const {
1545 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1546 }
1547
LongGprNeedsPadding() const1548 bool LongGprNeedsPadding() const {
1549 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1550 kAlignLongOnStack && // and when it needs alignment
1551 (gpr_index_ & 1) == 1; // counter is odd, see constructor
1552 }
1553
LongStackNeedsPadding() const1554 bool LongStackNeedsPadding() const {
1555 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1556 kAlignLongOnStack && // and when it needs 8B alignment
1557 (stack_entries_ & 1) == 1; // counter is odd
1558 }
1559
AdvanceLong(uint64_t val)1560 void AdvanceLong(uint64_t val) {
1561 if (HaveLongGpr()) {
1562 if (LongGprNeedsPadding()) {
1563 PushGpr(0);
1564 gpr_index_--;
1565 }
1566 if (kRegistersNeededForLong == 1) {
1567 PushGpr(static_cast<uintptr_t>(val));
1568 } else {
1569 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1570 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1571 }
1572 gpr_index_ -= kRegistersNeededForLong;
1573 } else {
1574 if (LongStackNeedsPadding()) {
1575 PushStack(0);
1576 stack_entries_++;
1577 }
1578 if (kRegistersNeededForLong == 1) {
1579 PushStack(static_cast<uintptr_t>(val));
1580 stack_entries_++;
1581 } else {
1582 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1583 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1584 stack_entries_ += 2;
1585 }
1586 gpr_index_ = 0;
1587 }
1588 }
1589
HaveFloatFpr() const1590 bool HaveFloatFpr() const {
1591 return fpr_index_ > 0;
1592 }
1593
AdvanceFloat(uint32_t val)1594 void AdvanceFloat(uint32_t val) {
1595 if (kNativeSoftFloatAbi) {
1596 AdvanceInt(val);
1597 } else if (HaveFloatFpr()) {
1598 fpr_index_--;
1599 if (kRegistersNeededForDouble == 1) {
1600 if (kNaNBoxing) {
1601 // NaN boxing: no widening, just use the bits, but reset upper bits to 1s.
1602 // See e.g. RISC-V manual, D extension, section "NaN Boxing of Narrower Values".
1603 PushFpr8(UINT64_C(0xFFFFFFFF00000000) | static_cast<uint64_t>(val));
1604 } else {
1605 // No widening, just use the bits.
1606 PushFpr8(static_cast<uint64_t>(val));
1607 }
1608 } else {
1609 PushFpr4(val);
1610 }
1611 } else if (kNativeSoftFloatAfterHardFloat) {
1612 // After using FP arg registers, pass FP args in general purpose registers or on the stack.
1613 AdvanceInt(val);
1614 } else {
1615 stack_entries_++;
1616 PushStack(static_cast<uintptr_t>(val));
1617 fpr_index_ = 0;
1618 }
1619 }
1620
HaveDoubleFpr() const1621 bool HaveDoubleFpr() const {
1622 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1623 }
1624
DoubleFprNeedsPadding() const1625 bool DoubleFprNeedsPadding() const {
1626 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1627 kAlignDoubleOnStack && // and when it needs alignment
1628 (fpr_index_ & 1) == 1; // counter is odd, see constructor
1629 }
1630
DoubleStackNeedsPadding() const1631 bool DoubleStackNeedsPadding() const {
1632 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1633 kAlignDoubleOnStack && // and when it needs 8B alignment
1634 (stack_entries_ & 1) == 1; // counter is odd
1635 }
1636
AdvanceDouble(uint64_t val)1637 void AdvanceDouble(uint64_t val) {
1638 if (kNativeSoftFloatAbi) {
1639 AdvanceLong(val);
1640 } else if (HaveDoubleFpr()) {
1641 if (DoubleFprNeedsPadding()) {
1642 PushFpr4(0);
1643 fpr_index_--;
1644 }
1645 PushFpr8(val);
1646 fpr_index_ -= kRegistersNeededForDouble;
1647 } else if (kNativeSoftFloatAfterHardFloat) {
1648 // After using FP arg registers, pass FP args in general purpose registers or on the stack.
1649 AdvanceLong(val);
1650 } else {
1651 if (DoubleStackNeedsPadding()) {
1652 PushStack(0);
1653 stack_entries_++;
1654 }
1655 if (kRegistersNeededForDouble == 1) {
1656 PushStack(static_cast<uintptr_t>(val));
1657 stack_entries_++;
1658 } else {
1659 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1660 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1661 stack_entries_ += 2;
1662 }
1663 fpr_index_ = 0;
1664 }
1665 }
1666
GetStackEntries() const1667 uint32_t GetStackEntries() const {
1668 return stack_entries_;
1669 }
1670
GetNumberOfUsedGprs() const1671 uint32_t GetNumberOfUsedGprs() const {
1672 return kNumNativeGprArgs - gpr_index_;
1673 }
1674
GetNumberOfUsedFprs() const1675 uint32_t GetNumberOfUsedFprs() const {
1676 return kNumNativeFprArgs - fpr_index_;
1677 }
1678
1679 private:
PushGpr(uintptr_t val)1680 void PushGpr(uintptr_t val) {
1681 delegate_->PushGpr(val);
1682 }
PushFpr4(float val)1683 void PushFpr4(float val) {
1684 delegate_->PushFpr4(val);
1685 }
PushFpr8(uint64_t val)1686 void PushFpr8(uint64_t val) {
1687 delegate_->PushFpr8(val);
1688 }
PushStack(uintptr_t val)1689 void PushStack(uintptr_t val) {
1690 delegate_->PushStack(val);
1691 }
1692
1693 uint32_t gpr_index_; // Number of free GPRs
1694 uint32_t fpr_index_; // Number of free FPRs
1695 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
1696 // extended
1697 T* const delegate_; // What Push implementation gets called
1698 };
1699
1700 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
1701 // in subclasses.
1702 //
1703 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
1704 // them with handles.
1705 class ComputeNativeCallFrameSize {
1706 public:
ComputeNativeCallFrameSize()1707 ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1708
~ComputeNativeCallFrameSize()1709 virtual ~ComputeNativeCallFrameSize() {}
1710
GetStackSize() const1711 uint32_t GetStackSize() const {
1712 return num_stack_entries_ * sizeof(uintptr_t);
1713 }
1714
LayoutStackArgs(uint8_t * sp8) const1715 uint8_t* LayoutStackArgs(uint8_t* sp8) const {
1716 sp8 -= GetStackSize();
1717 // Align by kStackAlignment; it is at least as strict as native stack alignment.
1718 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1719 return sp8;
1720 }
1721
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1722 virtual void WalkHeader(
1723 [[maybe_unused]] BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
1724 REQUIRES_SHARED(Locks::mutator_lock_) {}
1725
Walk(std::string_view shorty)1726 void Walk(std::string_view shorty) REQUIRES_SHARED(Locks::mutator_lock_) {
1727 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1728
1729 WalkHeader(&sm);
1730
1731 for (char c : shorty.substr(1u)) {
1732 Primitive::Type cur_type_ = Primitive::GetType(c);
1733 switch (cur_type_) {
1734 case Primitive::kPrimNot:
1735 sm.AdvancePointer(nullptr);
1736 break;
1737 case Primitive::kPrimBoolean:
1738 case Primitive::kPrimByte:
1739 case Primitive::kPrimChar:
1740 case Primitive::kPrimShort:
1741 case Primitive::kPrimInt:
1742 sm.AdvanceInt(0);
1743 break;
1744 case Primitive::kPrimFloat:
1745 sm.AdvanceFloat(0);
1746 break;
1747 case Primitive::kPrimDouble:
1748 sm.AdvanceDouble(0);
1749 break;
1750 case Primitive::kPrimLong:
1751 sm.AdvanceLong(0);
1752 break;
1753 default:
1754 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1755 UNREACHABLE();
1756 }
1757 }
1758
1759 num_stack_entries_ = sm.GetStackEntries();
1760 }
1761
PushGpr(uintptr_t)1762 void PushGpr(uintptr_t /* val */) {
1763 // not optimizing registers, yet
1764 }
1765
PushFpr4(float)1766 void PushFpr4(float /* val */) {
1767 // not optimizing registers, yet
1768 }
1769
PushFpr8(uint64_t)1770 void PushFpr8(uint64_t /* val */) {
1771 // not optimizing registers, yet
1772 }
1773
PushStack(uintptr_t)1774 void PushStack(uintptr_t /* val */) {
1775 // counting is already done in the superclass
1776 }
1777
1778 protected:
1779 uint32_t num_stack_entries_;
1780 };
1781
1782 class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
1783 public:
ComputeGenericJniFrameSize(bool critical_native)1784 explicit ComputeGenericJniFrameSize(bool critical_native)
1785 : critical_native_(critical_native) {}
1786
ComputeLayout(ArtMethod ** managed_sp,std::string_view shorty)1787 uintptr_t* ComputeLayout(ArtMethod** managed_sp, std::string_view shorty)
1788 REQUIRES_SHARED(Locks::mutator_lock_) {
1789 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
1790
1791 Walk(shorty);
1792
1793 // Add space for cookie.
1794 DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
1795 static_assert(sizeof(uintptr_t) >= sizeof(jni::LRTSegmentState));
1796 uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
1797
1798 // Layout stack arguments.
1799 sp8 = LayoutStackArgs(sp8);
1800
1801 // Return the new bottom.
1802 DCHECK_ALIGNED(sp8, sizeof(uintptr_t));
1803 return reinterpret_cast<uintptr_t*>(sp8);
1804 }
1805
GetStartGprRegs(uintptr_t * reserved_area)1806 static uintptr_t* GetStartGprRegs(uintptr_t* reserved_area) {
1807 return reserved_area;
1808 }
1809
GetStartFprRegs(uintptr_t * reserved_area)1810 static uint32_t* GetStartFprRegs(uintptr_t* reserved_area) {
1811 constexpr size_t num_gprs =
1812 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1813 return reinterpret_cast<uint32_t*>(GetStartGprRegs(reserved_area) + num_gprs);
1814 }
1815
GetHiddenArgSlot(uintptr_t * reserved_area)1816 static uintptr_t* GetHiddenArgSlot(uintptr_t* reserved_area) {
1817 // Note: `num_fprs` is 0 on architectures where sizeof(uintptr_t) does not match the
1818 // FP register size (it is actually 0 on all supported 32-bit architectures).
1819 constexpr size_t num_fprs =
1820 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1821 return reinterpret_cast<uintptr_t*>(GetStartFprRegs(reserved_area)) + num_fprs;
1822 }
1823
GetOutArgsSpSlot(uintptr_t * reserved_area)1824 static uintptr_t* GetOutArgsSpSlot(uintptr_t* reserved_area) {
1825 return GetHiddenArgSlot(reserved_area) + 1;
1826 }
1827
1828 // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1829 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
1830 REQUIRES_SHARED(Locks::mutator_lock_);
1831
1832 private:
1833 const bool critical_native_;
1834 };
1835
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1836 void ComputeGenericJniFrameSize::WalkHeader(
1837 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1838 // First 2 parameters are always excluded for @CriticalNative.
1839 if (UNLIKELY(critical_native_)) {
1840 return;
1841 }
1842
1843 // JNIEnv
1844 sm->AdvancePointer(nullptr);
1845
1846 // Class object or this as first argument
1847 sm->AdvancePointer(nullptr);
1848 }
1849
1850 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
1851 // the template requirements of BuildGenericJniFrameStateMachine.
1852 class FillNativeCall {
1853 public:
FillNativeCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1854 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1855 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1856
~FillNativeCall()1857 virtual ~FillNativeCall() {}
1858
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1859 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1860 cur_gpr_reg_ = gpr_regs;
1861 cur_fpr_reg_ = fpr_regs;
1862 cur_stack_arg_ = stack_args;
1863 }
1864
PushGpr(uintptr_t val)1865 void PushGpr(uintptr_t val) {
1866 *cur_gpr_reg_ = val;
1867 cur_gpr_reg_++;
1868 }
1869
PushFpr4(float val)1870 void PushFpr4(float val) {
1871 *cur_fpr_reg_ = val;
1872 cur_fpr_reg_++;
1873 }
1874
PushFpr8(uint64_t val)1875 void PushFpr8(uint64_t val) {
1876 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1877 *tmp = val;
1878 cur_fpr_reg_ += 2;
1879 }
1880
PushStack(uintptr_t val)1881 void PushStack(uintptr_t val) {
1882 *cur_stack_arg_ = val;
1883 cur_stack_arg_++;
1884 }
1885
1886 private:
1887 uintptr_t* cur_gpr_reg_;
1888 uint32_t* cur_fpr_reg_;
1889 uintptr_t* cur_stack_arg_;
1890 };
1891
1892 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
1893 // of transitioning into native code.
1894 class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
1895 public:
BuildGenericJniFrameVisitor(Thread * self,bool is_static,bool critical_native,std::string_view shorty,ArtMethod ** managed_sp,uintptr_t * reserved_area)1896 BuildGenericJniFrameVisitor(Thread* self,
1897 bool is_static,
1898 bool critical_native,
1899 std::string_view shorty,
1900 ArtMethod** managed_sp,
1901 uintptr_t* reserved_area)
1902 : QuickArgumentVisitor(managed_sp, is_static, shorty),
1903 jni_call_(nullptr, nullptr, nullptr),
1904 sm_(&jni_call_),
1905 current_vreg_(nullptr) {
1906 DCHECK_ALIGNED(managed_sp, kStackAlignment);
1907 DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
1908
1909 ComputeGenericJniFrameSize fsc(critical_native);
1910 uintptr_t* out_args_sp = fsc.ComputeLayout(managed_sp, shorty);
1911
1912 // Store hidden argument for @CriticalNative.
1913 uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
1914 constexpr uintptr_t kGenericJniTag = 1u;
1915 ArtMethod* method = *managed_sp;
1916 *hidden_arg_slot = critical_native ? (reinterpret_cast<uintptr_t>(method) | kGenericJniTag)
1917 : 0xebad6a89u; // Bad value.
1918
1919 // Set out args SP.
1920 uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
1921 *out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
1922
1923 // Prepare vreg pointer for spilling references.
1924 static constexpr size_t frame_size =
1925 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
1926 current_vreg_ = reinterpret_cast<uint32_t*>(
1927 reinterpret_cast<uint8_t*>(managed_sp) + frame_size + sizeof(ArtMethod*));
1928
1929 jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
1930 fsc.GetStartFprRegs(reserved_area),
1931 out_args_sp);
1932
1933 bool uses_critical_args = critical_native;
1934
1935 #ifdef ART_USE_RESTRICTED_MODE
1936 // IsCriticalNative() always returns false so check if the method is actually a critical native
1937 // method. If it is then it won't need the JNI environment or jclass arguments.
1938 constexpr uint32_t mask = kAccCriticalNative | kAccNative;
1939 uses_critical_args = (method->GetAccessFlags() & mask) == mask;
1940 #endif
1941
1942 // First 2 parameters are always excluded for CriticalNative methods.
1943 if (LIKELY(!uses_critical_args)) {
1944 // jni environment is always first argument
1945 sm_.AdvancePointer(self->GetJniEnv());
1946
1947 if (is_static) {
1948 // The `jclass` is a pointer to the method's declaring class.
1949 // The declaring class must be marked.
1950 auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
1951 method->GetDeclaringClassAddressWithoutBarrier());
1952 if (gUseReadBarrier) {
1953 artJniReadBarrier(method);
1954 }
1955 sm_.AdvancePointer(declaring_class);
1956 } // else "this" reference is already handled by QuickArgumentVisitor.
1957 }
1958 }
1959
1960 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1961
1962 private:
1963 FillNativeCall jni_call_;
1964 BuildNativeCallFrameStateMachine<FillNativeCall> sm_;
1965
1966 // Pointer to the current vreg in caller's reserved out vreg area.
1967 // Used for spilling reference arguments.
1968 uint32_t* current_vreg_;
1969
1970 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1971 };
1972
Visit()1973 void BuildGenericJniFrameVisitor::Visit() {
1974 Primitive::Type type = GetParamPrimitiveType();
1975 switch (type) {
1976 case Primitive::kPrimLong: {
1977 jlong long_arg;
1978 if (IsSplitLongOrDouble()) {
1979 long_arg = ReadSplitLongParam();
1980 } else {
1981 long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1982 }
1983 sm_.AdvanceLong(long_arg);
1984 current_vreg_ += 2u;
1985 break;
1986 }
1987 case Primitive::kPrimDouble: {
1988 uint64_t double_arg;
1989 if (IsSplitLongOrDouble()) {
1990 // Read into union so that we don't case to a double.
1991 double_arg = ReadSplitLongParam();
1992 } else {
1993 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1994 }
1995 sm_.AdvanceDouble(double_arg);
1996 current_vreg_ += 2u;
1997 break;
1998 }
1999 case Primitive::kPrimNot: {
2000 mirror::Object* obj =
2001 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress())->AsMirrorPtr();
2002 StackReference<mirror::Object>* spill_ref =
2003 reinterpret_cast<StackReference<mirror::Object>*>(current_vreg_);
2004 spill_ref->Assign(obj);
2005 sm_.AdvancePointer(obj != nullptr ? spill_ref : nullptr);
2006 current_vreg_ += 1u;
2007 break;
2008 }
2009 case Primitive::kPrimFloat:
2010 sm_.AdvanceFloat(*reinterpret_cast<uint32_t*>(GetParamAddress()));
2011 current_vreg_ += 1u;
2012 break;
2013 case Primitive::kPrimBoolean: // Fall-through.
2014 case Primitive::kPrimByte: // Fall-through.
2015 case Primitive::kPrimChar: // Fall-through.
2016 case Primitive::kPrimShort: // Fall-through.
2017 case Primitive::kPrimInt: // Fall-through.
2018 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
2019 current_vreg_ += 1u;
2020 break;
2021 case Primitive::kPrimVoid:
2022 LOG(FATAL) << "UNREACHABLE";
2023 UNREACHABLE();
2024 }
2025 }
2026
2027 /*
2028 * Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
2029 *
2030 * On entry, the stack has a standard callee-save frame above `managed_sp`,
2031 * and the reserved area below it. Starting below `managed_sp`, we reserve space
2032 * for local reference cookie (not present for @CriticalNative), HandleScope
2033 * (not present for @CriticalNative) and stack args (if args do not fit into
2034 * registers). At the bottom of the reserved area, there is space for register
2035 * arguments, hidden arg (for @CriticalNative) and the SP for the native call
2036 * (i.e. pointer to the stack args area), which the calling stub shall load
2037 * to perform the native call. We fill all these fields, perform class init
2038 * check (for static methods) and/or locking (for synchronized methods) if
2039 * needed and return to the stub.
2040 *
2041 * The return value is the pointer to the native code, null on failure.
2042 *
2043 * NO_THREAD_SAFETY_ANALYSIS: Depending on the use case, the trampoline may
2044 * or may not lock a synchronization object and transition out of Runnable.
2045 */
artQuickGenericJniTrampoline(Thread * self,ArtMethod ** managed_sp,uintptr_t * reserved_area)2046 extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
2047 ArtMethod** managed_sp,
2048 uintptr_t* reserved_area)
2049 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
2050 // Note: We cannot walk the stack properly until fixed up below.
2051 ArtMethod* called = *managed_sp;
2052 DCHECK(called->IsNative()) << called->PrettyMethod(true);
2053 Runtime* runtime = Runtime::Current();
2054 std::string_view shorty = called->GetShortyView();
2055 bool critical_native = called->IsCriticalNative();
2056 bool fast_native = called->IsFastNative();
2057 bool normal_native = !critical_native && !fast_native;
2058
2059 // Run the visitor and update sp.
2060 BuildGenericJniFrameVisitor visitor(self,
2061 called->IsStatic(),
2062 critical_native,
2063 shorty,
2064 managed_sp,
2065 reserved_area);
2066 {
2067 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
2068 visitor.VisitArguments();
2069 }
2070
2071 // Fix up managed-stack things in Thread. After this we can walk the stack.
2072 self->SetTopOfStackGenericJniTagged(managed_sp);
2073
2074 self->VerifyStack();
2075
2076 // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use.
2077 jit::Jit* jit = runtime->GetJit();
2078 if (jit != nullptr) {
2079 jit->MethodEntered(self, called);
2080 }
2081
2082 // We can set the entrypoint of a native method to generic JNI even when the
2083 // class hasn't been initialized, so we need to do the initialization check
2084 // before invoking the native code.
2085 if (called->StillNeedsClinitCheck()) {
2086 // Ensure static method's class is initialized.
2087 StackHandleScope<1> hs(self);
2088 Handle<mirror::Class> h_class = hs.NewHandle(called->GetDeclaringClass());
2089 if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
2090 DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
2091 return nullptr; // Report error.
2092 }
2093 }
2094
2095 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2096 if (UNLIKELY(instr->HasMethodEntryListeners())) {
2097 instr->MethodEnterEvent(self, called);
2098 if (self->IsExceptionPending()) {
2099 return nullptr;
2100 }
2101 }
2102
2103 // Skip calling `artJniMethodStart()` for @CriticalNative and @FastNative.
2104 if (LIKELY(normal_native)) {
2105 // Start JNI.
2106 if (called->IsSynchronized()) {
2107 ObjPtr<mirror::Object> lock = GetGenericJniSynchronizationObject(self, called);
2108 DCHECK(lock != nullptr);
2109 lock->MonitorEnter(self);
2110 if (self->IsExceptionPending()) {
2111 return nullptr; // Report error.
2112 }
2113 }
2114 if (UNLIKELY(self->ReadFlag(ThreadFlag::kMonitorJniEntryExit, std::memory_order_relaxed))) {
2115 artJniMonitoredMethodStart(self);
2116 } else {
2117 artJniMethodStart(self);
2118 }
2119 } else {
2120 DCHECK(!called->IsSynchronized())
2121 << "@FastNative/@CriticalNative and synchronize is not supported";
2122 }
2123
2124 // Skip pushing LRT frame for @CriticalNative.
2125 if (LIKELY(!critical_native)) {
2126 // Push local reference frame.
2127 JNIEnvExt* env = self->GetJniEnv();
2128 DCHECK(env != nullptr);
2129 uint32_t cookie = bit_cast<uint32_t>(env->PushLocalReferenceFrame());
2130
2131 // Save the cookie on the stack.
2132 uint32_t* sp32 = reinterpret_cast<uint32_t*>(managed_sp);
2133 *(sp32 - 1) = cookie;
2134 }
2135
2136 // Retrieve the stored native code.
2137 // Note that it may point to the lookup stub or trampoline.
2138 // FIXME: This is broken for @CriticalNative as the art_jni_dlsym_lookup_stub
2139 // does not handle that case. Calls from compiled stubs are also broken.
2140 void const* nativeCode = called->GetEntryPointFromJni();
2141
2142 VLOG(third_party_jni) << "GenericJNI: "
2143 << called->PrettyMethod()
2144 << " -> "
2145 << std::hex << reinterpret_cast<uintptr_t>(nativeCode);
2146
2147 // Return native code.
2148 return nativeCode;
2149 }
2150
2151 // Defined in quick_jni_entrypoints.cc.
2152 extern uint64_t GenericJniMethodEnd(Thread* self,
2153 uint32_t saved_local_ref_cookie,
2154 jvalue result,
2155 uint64_t result_f,
2156 ArtMethod* called);
2157
2158 /*
2159 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2160 * unlocking.
2161 */
artQuickGenericJniEndTrampoline(Thread * self,jvalue result,uint64_t result_f)2162 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2163 jvalue result,
2164 uint64_t result_f) {
2165 // We're here just back from a native call. We don't have the shared mutator lock at this point
2166 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2167 // anything that requires a mutator lock before that would cause problems as GC may have the
2168 // exclusive mutator lock and may be moving objects, etc.
2169 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2170 DCHECK(self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
2171 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2172 ArtMethod* called = *sp;
2173 uint32_t cookie = *(sp32 - 1);
2174 return GenericJniMethodEnd(self, cookie, result, result_f, called);
2175 }
2176
2177 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2178 // for the method pointer.
2179 //
2180 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2181 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
2182
2183 template <InvokeType type>
artInvokeCommon(uint32_t method_idx,ObjPtr<mirror::Object> this_object,Thread * self,ArtMethod ** sp)2184 static TwoWordReturn artInvokeCommon(uint32_t method_idx,
2185 ObjPtr<mirror::Object> this_object,
2186 Thread* self,
2187 ArtMethod** sp) {
2188 ScopedQuickEntrypointChecks sqec(self);
2189 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2190 uint32_t dex_pc;
2191 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2192 CodeItemInstructionAccessor accessor(caller_method->DexInstructions());
2193 DCHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
2194 const Instruction& instr = accessor.InstructionAt(dex_pc);
2195 bool string_init = false;
2196 ArtMethod* method = FindMethodToCall<type>(
2197 self, caller_method, &this_object, instr, /* only_lookup_tls_cache= */ true, &string_init);
2198
2199 if (UNLIKELY(method == nullptr)) {
2200 if (self->IsExceptionPending()) {
2201 // Return a failure if the first lookup threw an exception.
2202 return GetTwoWordFailureValue(); // Failure.
2203 }
2204 const DexFile* dex_file = caller_method->GetDexFile();
2205 std::string_view shorty =
2206 dex_file->GetMethodShortyView(dex_file->GetMethodId(method_idx));
2207 {
2208 // Remember the args in case a GC happens in FindMethodToCall.
2209 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2210 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, &soa);
2211 visitor.VisitArguments();
2212
2213 method = FindMethodToCall<type>(self,
2214 caller_method,
2215 &this_object,
2216 instr,
2217 /* only_lookup_tls_cache= */ false,
2218 &string_init);
2219
2220 visitor.FixupReferences();
2221 }
2222
2223 if (UNLIKELY(method == nullptr)) {
2224 CHECK(self->IsExceptionPending());
2225 return GetTwoWordFailureValue(); // Failure.
2226 }
2227 }
2228 DCHECK(!self->IsExceptionPending());
2229 const void* code = method->GetEntryPointFromQuickCompiledCode();
2230
2231 // When we return, the caller will branch to this address, so it had better not be 0!
2232 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2233 << " location: "
2234 << method->GetDexFile()->GetLocation();
2235
2236 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2237 reinterpret_cast<uintptr_t>(method));
2238 }
2239
2240 // Explicit artInvokeCommon template function declarations to please analysis tool.
2241 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type) \
2242 template REQUIRES_SHARED(Locks::mutator_lock_) \
2243 TwoWordReturn artInvokeCommon<type>( \
2244 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
2245
2246 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual);
2247 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface);
2248 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect);
2249 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic);
2250 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper);
2251 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2252
2253 // See comments in runtime_support_asm.S
artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2254 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2255 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2256 REQUIRES_SHARED(Locks::mutator_lock_) {
2257 return artInvokeCommon<kInterface>(method_idx, this_object, self, sp);
2258 }
2259
artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2260 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2261 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2262 REQUIRES_SHARED(Locks::mutator_lock_) {
2263 return artInvokeCommon<kDirect>(method_idx, this_object, self, sp);
2264 }
2265
artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2266 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2267 uint32_t method_idx, [[maybe_unused]] mirror::Object* this_object, Thread* self, ArtMethod** sp)
2268 REQUIRES_SHARED(Locks::mutator_lock_) {
2269 // For static, this_object is not required and may be random garbage. Don't pass it down so that
2270 // it doesn't cause ObjPtr alignment failure check.
2271 return artInvokeCommon<kStatic>(method_idx, nullptr, self, sp);
2272 }
2273
artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2274 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2275 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2276 REQUIRES_SHARED(Locks::mutator_lock_) {
2277 return artInvokeCommon<kSuper>(method_idx, this_object, self, sp);
2278 }
2279
artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2280 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2281 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2282 REQUIRES_SHARED(Locks::mutator_lock_) {
2283 return artInvokeCommon<kVirtual>(method_idx, this_object, self, sp);
2284 }
2285
2286 // Determine target of interface dispatch. The interface method and this object are known non-null.
2287 // The interface method is the method returned by the dex cache in the conflict trampoline.
artInvokeInterfaceTrampoline(ArtMethod * interface_method,mirror::Object * raw_this_object,Thread * self,ArtMethod ** sp)2288 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
2289 mirror::Object* raw_this_object,
2290 Thread* self,
2291 ArtMethod** sp)
2292 REQUIRES_SHARED(Locks::mutator_lock_) {
2293 ScopedQuickEntrypointChecks sqec(self);
2294
2295 Runtime* runtime = Runtime::Current();
2296 bool resolve_method = ((interface_method == nullptr) || interface_method->IsRuntimeMethod());
2297 if (UNLIKELY(resolve_method)) {
2298 // The interface method is unresolved, so resolve it in the dex file of the caller.
2299 // Fetch the dex_method_idx of the target interface method from the caller.
2300 StackHandleScope<1> hs(self);
2301 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
2302 uint32_t dex_pc;
2303 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2304 uint32_t dex_method_idx;
2305 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc);
2306 Instruction::Code instr_code = instr.Opcode();
2307 DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2308 instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2309 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr);
2310 if (instr_code == Instruction::INVOKE_INTERFACE) {
2311 dex_method_idx = instr.VRegB_35c();
2312 } else {
2313 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2314 dex_method_idx = instr.VRegB_3rc();
2315 }
2316
2317 const DexFile& dex_file = *caller_method->GetDexFile();
2318 std::string_view shorty =
2319 dex_file.GetMethodShortyView(dex_file.GetMethodId(dex_method_idx));
2320 {
2321 // Remember the args in case a GC happens in ClassLinker::ResolveMethod().
2322 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2323 RememberForGcArgumentVisitor visitor(sp, false, shorty, &soa);
2324 visitor.VisitArguments();
2325 ClassLinker* class_linker = runtime->GetClassLinker();
2326 interface_method = class_linker->ResolveMethodId(dex_method_idx, caller_method);
2327 visitor.FixupReferences();
2328 }
2329
2330 if (UNLIKELY(interface_method == nullptr)) {
2331 CHECK(self->IsExceptionPending());
2332 return GetTwoWordFailureValue(); // Failure.
2333 }
2334 ArtMethod* outer_method = QuickArgumentVisitor::GetOuterMethod(sp);
2335 MaybeUpdateBssMethodEntry(
2336 interface_method, MethodReference(&dex_file, dex_method_idx), outer_method);
2337
2338 // Refresh `raw_this_object` which may have changed after resolution.
2339 raw_this_object = this_object.Get();
2340 }
2341
2342 // The compiler and interpreter make sure the conflict trampoline is never
2343 // called on a method that resolves to j.l.Object.
2344 DCHECK(!interface_method->GetDeclaringClass()->IsObjectClass());
2345 DCHECK(interface_method->GetDeclaringClass()->IsInterface());
2346 DCHECK(!interface_method->IsRuntimeMethod());
2347 DCHECK(!interface_method->IsCopied());
2348
2349 ObjPtr<mirror::Object> obj_this = raw_this_object;
2350 ObjPtr<mirror::Class> cls = obj_this->GetClass();
2351 uint32_t imt_index = interface_method->GetImtIndex();
2352 ImTable* imt = cls->GetImt(kRuntimePointerSize);
2353 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
2354 DCHECK(conflict_method->IsRuntimeMethod());
2355
2356 if (UNLIKELY(resolve_method)) {
2357 // Now that we know the interface method, look it up in the conflict table.
2358 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
2359 DCHECK(current_table != nullptr);
2360 ArtMethod* method = current_table->Lookup(interface_method, kRuntimePointerSize);
2361 if (method != nullptr) {
2362 return GetTwoWordSuccessValue(
2363 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2364 reinterpret_cast<uintptr_t>(method));
2365 }
2366 // Interface method is not in the conflict table. Continue looking up in the
2367 // iftable.
2368 }
2369
2370 ArtMethod* method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2371 if (UNLIKELY(method == nullptr)) {
2372 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2373 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2374 interface_method, obj_this.Ptr(), caller_method);
2375 return GetTwoWordFailureValue();
2376 }
2377
2378 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2379 // We create a new table with the new pair { interface_method, method }.
2380
2381 // Classes in the boot image should never need to update conflict methods in
2382 // their IMT.
2383 CHECK(!runtime->GetHeap()->ObjectIsInBootImageSpace(cls.Ptr())) << cls->PrettyClass();
2384 ArtMethod* new_conflict_method = runtime->GetClassLinker()->AddMethodToConflictTable(
2385 cls.Ptr(),
2386 conflict_method,
2387 interface_method,
2388 method);
2389 if (new_conflict_method != conflict_method) {
2390 // Update the IMT if we create a new conflict method. No fence needed here, as the
2391 // data is consistent.
2392 imt->Set(imt_index,
2393 new_conflict_method,
2394 kRuntimePointerSize);
2395 }
2396
2397 const void* code = method->GetEntryPointFromQuickCompiledCode();
2398
2399 // When we return, the caller will branch to this address, so it had better not be 0!
2400 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2401 << " location: " << method->GetDexFile()->GetLocation();
2402
2403 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2404 reinterpret_cast<uintptr_t>(method));
2405 }
2406
2407 // Returns uint64_t representing raw bits from JValue.
artInvokePolymorphic(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2408 extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp)
2409 REQUIRES_SHARED(Locks::mutator_lock_) {
2410 ScopedQuickEntrypointChecks sqec(self);
2411 DCHECK(raw_receiver != nullptr);
2412 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2413
2414 // Start new JNI local reference state
2415 JNIEnvExt* env = self->GetJniEnv();
2416 ScopedObjectAccessUnchecked soa(env);
2417 ScopedJniEnvLocalRefState env_state(env);
2418 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2419
2420 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2421 uint32_t dex_pc;
2422 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2423 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2424 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2425 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2426 const dex::ProtoIndex proto_idx(inst.VRegH());
2427 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2428 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
2429 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, &soa);
2430 gc_visitor.VisitArguments();
2431
2432 // Wrap raw_receiver in a Handle for safety.
2433 StackHandleScope<3> hs(self);
2434 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
2435 raw_receiver = nullptr;
2436 self->EndAssertNoThreadSuspension(old_cause);
2437
2438 // Resolve method.
2439 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2440 ArtMethod* resolved_method = linker->ResolveMethodWithChecks(
2441 inst.VRegB(), caller_method, kVirtual);
2442
2443 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2444 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
2445
2446 // Fix references before constructing the shadow frame.
2447 gc_visitor.FixupReferences();
2448
2449 // Construct shadow frame placing arguments consecutively from |first_arg|.
2450 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2451 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2452 const size_t first_arg = 0;
2453 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2454 CREATE_SHADOW_FRAME(num_vregs, resolved_method, dex_pc);
2455 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2456 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2457 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2458 kMethodIsStatic,
2459 shorty,
2460 shadow_frame,
2461 first_arg);
2462 shadow_frame_builder.VisitArguments();
2463
2464 // Push a transition back into managed code onto the linked list in thread.
2465 ManagedStack fragment;
2466 self->PushManagedStackFragment(&fragment);
2467
2468 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
2469 // consecutive order.
2470 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2471 Intrinsics intrinsic = resolved_method->GetIntrinsic();
2472 JValue result;
2473 bool success = false;
2474 if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) {
2475 Handle<mirror::MethodType> method_type(
2476 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2477 if (UNLIKELY(method_type.IsNull())) {
2478 // This implies we couldn't resolve one or more types in this method handle.
2479 CHECK(self->IsExceptionPending());
2480 return 0UL;
2481 }
2482
2483 Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
2484 ObjPtr<mirror::MethodHandle>::DownCast(receiver_handle.Get())));
2485 if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
2486 success = MethodHandleInvokeExact(self,
2487 *shadow_frame,
2488 method_handle,
2489 method_type,
2490 &operands,
2491 &result);
2492 } else {
2493 DCHECK_EQ(static_cast<uint32_t>(intrinsic),
2494 static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
2495 success = MethodHandleInvoke(self,
2496 *shadow_frame,
2497 method_handle,
2498 method_type,
2499 &operands,
2500 &result);
2501 }
2502 } else {
2503 DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass());
2504 Handle<mirror::VarHandle> var_handle(hs.NewHandle(
2505 ObjPtr<mirror::VarHandle>::DownCast(receiver_handle.Get())));
2506 mirror::VarHandle::AccessMode access_mode =
2507 mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
2508
2509 success = VarHandleInvokeAccessor(self,
2510 *shadow_frame,
2511 var_handle,
2512 caller_method,
2513 proto_idx,
2514 access_mode,
2515 &operands,
2516 &result);
2517 }
2518
2519 DCHECK(success || self->IsExceptionPending());
2520
2521 // Pop transition record.
2522 self->PopManagedStackFragment(fragment);
2523
2524 bool is_ref = (shorty[0] == 'L');
2525 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2526 self, DeoptimizationMethodType::kDefault, is_ref, result);
2527
2528 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2529 }
2530
artInvokePolymorphicWithHiddenReceiver(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2531 extern "C" uint64_t artInvokePolymorphicWithHiddenReceiver(mirror::Object* raw_receiver,
2532 Thread* self,
2533 ArtMethod** sp)
2534 REQUIRES_SHARED(Locks::mutator_lock_) {
2535 ScopedQuickEntrypointChecks sqec(self);
2536 DCHECK(raw_receiver != nullptr);
2537 DCHECK(raw_receiver->InstanceOf(WellKnownClasses::java_lang_invoke_MethodHandle.Get()));
2538 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2539
2540 JNIEnvExt* env = self->GetJniEnv();
2541 ScopedObjectAccessUnchecked soa(env);
2542 ScopedJniEnvLocalRefState env_state(env);
2543 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2544
2545 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2546 uint32_t dex_pc;
2547 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2548 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2549 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2550 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2551 const dex::ProtoIndex proto_idx(inst.VRegH());
2552 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2553
2554 // invokeExact is not a static method, but here we use custom calling convention and the receiver
2555 // (MethodHandle) object is not passed as a first argument, but through different means and hence
2556 // shorty and arguments allocation looks as-if invokeExact was static.
2557 RememberForGcArgumentVisitor gc_visitor(sp, /* is_static= */ true, shorty, &soa);
2558 gc_visitor.VisitArguments();
2559
2560 // Wrap raw_receiver in a Handle for safety.
2561 StackHandleScope<2> hs(self);
2562 Handle<mirror::MethodHandle> method_handle(
2563 hs.NewHandle(down_cast<mirror::MethodHandle*>(raw_receiver)));
2564
2565 self->EndAssertNoThreadSuspension(old_cause);
2566
2567 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2568 ArtMethod* invoke_exact = WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact;
2569 if (kIsDebugBuild) {
2570 ArtMethod* resolved_method = linker->ResolveMethodWithChecks(
2571 inst.VRegB(), caller_method, kVirtual);
2572 CHECK_EQ(resolved_method, invoke_exact);
2573 }
2574
2575 Handle<mirror::MethodType> method_type(
2576 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2577 if (UNLIKELY(method_type.IsNull())) {
2578 // This implies we couldn't resolve one or more types in this method handle.
2579 CHECK(self->IsExceptionPending());
2580 return 0UL;
2581 }
2582
2583 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2584
2585 // Fix references before constructing the shadow frame.
2586 gc_visitor.FixupReferences();
2587
2588 // Construct shadow frame placing arguments consecutively from |first_arg|.
2589 const bool is_range = inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE;
2590 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2591 const size_t first_arg = 0;
2592 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2593 CREATE_SHADOW_FRAME(num_vregs, invoke_exact, dex_pc);
2594 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2595 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2596 // Pretend the method is static, see the gc_visitor comment above.
2597 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2598 /* is_static= */ true,
2599 shorty,
2600 shadow_frame,
2601 first_arg);
2602 // Receiver is not passed as a regular argument, adding it to ShadowFrame manually.
2603 shadow_frame_builder.SetReceiver(method_handle.Get());
2604 shadow_frame_builder.VisitArguments();
2605
2606 // Push a transition back into managed code onto the linked list in thread.
2607 ManagedStack fragment;
2608 self->PushManagedStackFragment(&fragment);
2609
2610 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2611 JValue result;
2612 bool success = MethodHandleInvokeExact(self,
2613 *shadow_frame,
2614 method_handle,
2615 method_type,
2616 &operands,
2617 &result);
2618
2619 DCHECK(success || self->IsExceptionPending());
2620
2621 // Pop transition record.
2622 self->PopManagedStackFragment(fragment);
2623
2624 bool is_ref = shorty[0] == 'L';
2625 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2626 self, DeoptimizationMethodType::kDefault, is_ref, result);
2627
2628 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2629 }
2630
2631 // Returns uint64_t representing raw bits from JValue.
artInvokeCustom(uint32_t call_site_idx,Thread * self,ArtMethod ** sp)2632 extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp)
2633 REQUIRES_SHARED(Locks::mutator_lock_) {
2634 ScopedQuickEntrypointChecks sqec(self);
2635 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2636
2637 // invoke-custom is effectively a static call (no receiver).
2638 static constexpr bool kMethodIsStatic = true;
2639
2640 // Start new JNI local reference state
2641 JNIEnvExt* env = self->GetJniEnv();
2642 ScopedObjectAccessUnchecked soa(env);
2643 ScopedJniEnvLocalRefState env_state(env);
2644
2645 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2646
2647 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2648 uint32_t dex_pc;
2649 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2650 const DexFile* dex_file = caller_method->GetDexFile();
2651 const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
2652 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2653
2654 // Construct the shadow frame placing arguments consecutively from |first_arg|.
2655 const size_t first_arg = 0;
2656 const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
2657 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2658 CREATE_SHADOW_FRAME(num_vregs, caller_method, dex_pc);
2659 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2660 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2661 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2662 kMethodIsStatic,
2663 shorty,
2664 shadow_frame,
2665 first_arg);
2666 shadow_frame_builder.VisitArguments();
2667
2668 // Push a transition back into managed code onto the linked list in thread.
2669 ManagedStack fragment;
2670 self->PushManagedStackFragment(&fragment);
2671 self->EndAssertNoThreadSuspension(old_cause);
2672
2673 // Perform the invoke-custom operation.
2674 RangeInstructionOperands operands(first_arg, num_vregs);
2675 JValue result;
2676 bool success =
2677 interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result);
2678 DCHECK(success || self->IsExceptionPending());
2679
2680 // Pop transition record.
2681 self->PopManagedStackFragment(fragment);
2682
2683 bool is_ref = (shorty[0] == 'L');
2684 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2685 self, DeoptimizationMethodType::kDefault, is_ref, result);
2686
2687 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2688 }
2689
artJniMethodEntryHook(Thread * self)2690 extern "C" void artJniMethodEntryHook(Thread* self)
2691 REQUIRES_SHARED(Locks::mutator_lock_) {
2692 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2693 ArtMethod* method = *self->GetManagedStack()->GetTopQuickFrame();
2694 instr->MethodEnterEvent(self, method);
2695 }
2696
artMethodEntryHook(ArtMethod * method,Thread * self,ArtMethod ** sp)2697 extern "C" Context* artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp)
2698 REQUIRES_SHARED(Locks::mutator_lock_) {
2699 ScopedQuickEntrypointChecks sqec(self);
2700 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2701 if (instr->HasFastMethodEntryListenersOnly()) {
2702 instr->MethodEnterEvent(self, method);
2703 // No exception or deoptimization.
2704 return nullptr;
2705 }
2706
2707 if (instr->HasMethodEntryListeners()) {
2708 instr->MethodEnterEvent(self, method);
2709 // MethodEnter callback could have requested a deopt for ex: by setting a breakpoint, so
2710 // check if we need a deopt here.
2711 if (instr->ShouldDeoptimizeCaller(self, sp) || instr->IsDeoptimized(method)) {
2712 // Instrumentation can request deoptimizing only a particular method (for ex: when
2713 // there are break points on the method). In such cases deoptimize only this method.
2714 // FullFrame deoptimizations are handled on method exits.
2715 return artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
2716 }
2717 } else {
2718 DCHECK(!instr->IsDeoptimized(method));
2719 }
2720 // No exception or deoptimization.
2721 return nullptr;
2722 }
2723
artMethodExitHook(Thread * self,ArtMethod ** sp,uint64_t * gpr_result,uint64_t * fpr_result,uint32_t frame_size)2724 extern "C" Context* artMethodExitHook(Thread* self,
2725 ArtMethod** sp,
2726 uint64_t* gpr_result,
2727 uint64_t* fpr_result,
2728 uint32_t frame_size)
2729 REQUIRES_SHARED(Locks::mutator_lock_) {
2730 ScopedQuickEntrypointChecks sqec(self);
2731 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
2732 // Instrumentation exit stub must not be entered with a pending exception.
2733 CHECK(!self->IsExceptionPending())
2734 << "Enter instrumentation exit stub with pending exception " << self->GetException()->Dump();
2735
2736 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2737 DCHECK(instr->RunExitHooks());
2738
2739 ArtMethod* method = *sp;
2740 if (instr->HasFastMethodExitListenersOnly()) {
2741 // Fast method listeners are only used for tracing which don't need any deoptimization checks
2742 // or a return value.
2743 JValue return_value;
2744 instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
2745 // No exception or deoptimization.
2746 return nullptr;
2747 }
2748
2749 bool is_ref = false;
2750 if (instr->HasMethodExitListeners()) {
2751 StackHandleScope<1> hs(self);
2752
2753 CHECK(gpr_result != nullptr);
2754 CHECK(fpr_result != nullptr);
2755
2756 JValue return_value = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
2757 MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
2758 if (is_ref) {
2759 // Take a handle to the return value so we won't lose it if we suspend.
2760 res.Assign(return_value.GetL());
2761 }
2762 DCHECK(!method->IsRuntimeMethod());
2763
2764 // If we need a deoptimization MethodExitEvent will be called by the interpreter when it
2765 // re-executes the return instruction. For native methods we have to process method exit
2766 // events here since deoptimization just removes the native frame.
2767 instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
2768
2769 if (is_ref) {
2770 // Restore the return value if it's a reference since it might have moved.
2771 *reinterpret_cast<mirror::Object**>(gpr_result) = res.Get();
2772 return_value.SetL(res.Get());
2773 }
2774 }
2775
2776 if (self->IsExceptionPending() || self->ObserveAsyncException()) {
2777 // The exception was thrown from the method exit callback. We should not call method unwind
2778 // callbacks for this case.
2779 std::unique_ptr<Context> context =
2780 self->QuickDeliverException(/* is_method_exit_exception= */ true);
2781 DCHECK(context != nullptr);
2782 return context.release();
2783 }
2784
2785 // We should deoptimize here if the caller requires a deoptimization or if the current method
2786 // needs a deoptimization. We may need deoptimization for the current method if method exit
2787 // hooks requested this frame to be popped. IsForcedInterpreterNeededForUpcall checks for that.
2788 const bool deoptimize = instr->ShouldDeoptimizeCaller(self, sp, frame_size) ||
2789 Dbg::IsForcedInterpreterNeededForUpcall(self, method);
2790 if (deoptimize) {
2791 JValue ret_val = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
2792 DeoptimizationMethodType deopt_method_type = instr->GetDeoptimizationMethodType(method);
2793 self->PushDeoptimizationContext(
2794 ret_val, is_ref, self->GetException(), false, deopt_method_type);
2795 // Method exit callback has already been run for this method. So tell the deoptimizer to skip
2796 // callbacks for this frame.
2797 std::unique_ptr<Context> context = self->Deoptimize(DeoptimizationKind::kFullFrame,
2798 /* single_frame= */ false,
2799 /* skip_method_exit_callbacks= */ true);
2800 DCHECK(context != nullptr);
2801 return context.release();
2802 }
2803
2804 // No exception or deoptimization.
2805 return nullptr;
2806 }
2807
artRecordLongRunningMethodTraceEvent(ArtMethod * method,Thread * self,bool is_entry)2808 extern "C" void artRecordLongRunningMethodTraceEvent(ArtMethod* method, Thread* self, bool is_entry)
2809 REQUIRES_SHARED(Locks::mutator_lock_) {
2810 TraceProfiler::FlushBufferAndRecordTraceEvent(method, self, is_entry);
2811 }
2812
2813 } // namespace art
2814