• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "base/callee_save_type.h"
22 #include "base/pointer_size.h"
23 #include "class_linker-inl.h"
24 #include "class_root-inl.h"
25 #include "common_runtime_test.h"
26 #include "entrypoints/quick/quick_entrypoints_enum.h"
27 #include "imt_conflict_table.h"
28 #include "jni/jni_internal.h"
29 #include "linear_alloc-inl.h"
30 #include "mirror/class-alloc-inl.h"
31 #include "mirror/string-inl.h"
32 #include "mirror/object_array-alloc-inl.h"
33 #include "scoped_thread_state_change-inl.h"
34 
35 namespace art HIDDEN {
36 
37 
38 class StubTest : public CommonRuntimeTest {
39  protected:
40   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()41   void SetUp() override {
42     // Do the normal setup.
43     CommonRuntimeTest::SetUp();
44 
45     {
46       // Create callee-save methods
47       ScopedObjectAccess soa(Thread::Current());
48       runtime_->SetInstructionSet(kRuntimeISA);
49       for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
50         CalleeSaveType type = CalleeSaveType(i);
51         if (!runtime_->HasCalleeSaveMethod(type)) {
52           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
53         }
54       }
55     }
56   }
57 
SetUpRuntimeOptions(RuntimeOptions * options)58   void SetUpRuntimeOptions(RuntimeOptions *options) override {
59     // Use a smaller heap
60     for (std::pair<std::string, const void*>& pair : *options) {
61       if (pair.first.find("-Xmx") == 0) {
62         pair.first = "-Xmx4M";  // Smallest we can go.
63       }
64     }
65     options->push_back(std::make_pair("-Xint", nullptr));
66   }
67 
68   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)69   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
70     return &self->tlsPtr_;
71   }
72 
73  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)74   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
75     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
76   }
77 
78   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)79   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
80                              ArtMethod* referrer) {
81     return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
82   }
83 
84   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)85   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
86                                       Thread* self, ArtMethod* referrer, size_t hidden) {
87     // Push a transition back into managed code onto the linked list in thread.
88     ManagedStack fragment;
89     self->PushManagedStackFragment(&fragment);
90 
91     size_t result;
92     size_t fpr_result = 0;
93 #if defined(__i386__)
94     // TODO: Set the thread?
95 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
96 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
97     __asm__ __volatile__(
98         "movd %[hidden], %%xmm7\n\t"  // This is a memory op, so do this early. If it is off of
99                                       // esp, then we won't be able to access it after spilling.
100 
101         // Spill 6 registers.
102         PUSH(%%ebx)
103         PUSH(%%ecx)
104         PUSH(%%edx)
105         PUSH(%%esi)
106         PUSH(%%edi)
107         PUSH(%%ebp)
108 
109         // Store the inputs to the stack, but keep the referrer up top, less work.
110         PUSH(%[referrer])           // Align stack.
111         PUSH(%[referrer])           // Store referrer
112 
113         PUSH(%[arg0])
114         PUSH(%[arg1])
115         PUSH(%[arg2])
116         PUSH(%[code])
117         // Now read them back into the required registers.
118         POP(%%edi)
119         POP(%%edx)
120         POP(%%ecx)
121         POP(%%eax)
122         // Call is prepared now.
123 
124         "call *%%edi\n\t"           // Call the stub
125         "addl $8, %%esp\n\t"        // Pop referrer and padding.
126         ".cfi_adjust_cfa_offset -8\n\t"
127 
128         // Restore 6 registers.
129         POP(%%ebp)
130         POP(%%edi)
131         POP(%%esi)
132         POP(%%edx)
133         POP(%%ecx)
134         POP(%%ebx)
135 
136         : "=a" (result)
137           // Use the result from eax
138         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
139           [referrer]"r"(referrer), [hidden]"m"(hidden)
140           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
141         : "memory", "xmm7");  // clobber.
142 #undef PUSH
143 #undef POP
144 #elif defined(__arm__)
145     __asm__ __volatile__(
146         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
147         ".cfi_adjust_cfa_offset 52\n\t"
148         "push {r9}\n\t"
149         ".cfi_adjust_cfa_offset 4\n\t"
150         "mov r9, %[referrer]\n\n"
151         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
152         ".cfi_adjust_cfa_offset 8\n\t"
153         "ldr r9, [sp, #8]\n\t"
154 
155         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
156         "sub sp, sp, #24\n\t"
157         "str %[arg0], [sp]\n\t"
158         "str %[arg1], [sp, #4]\n\t"
159         "str %[arg2], [sp, #8]\n\t"
160         "str %[code], [sp, #12]\n\t"
161         "str %[self], [sp, #16]\n\t"
162         "str %[hidden], [sp, #20]\n\t"
163         "ldr r0, [sp]\n\t"
164         "ldr r1, [sp, #4]\n\t"
165         "ldr r2, [sp, #8]\n\t"
166         "ldr r3, [sp, #12]\n\t"
167         "ldr r9, [sp, #16]\n\t"
168         "ldr r12, [sp, #20]\n\t"
169         "add sp, sp, #24\n\t"
170 
171         "blx r3\n\t"                // Call the stub
172         "add sp, sp, #12\n\t"       // Pop null and padding
173         ".cfi_adjust_cfa_offset -12\n\t"
174         "pop {r1-r12, lr}\n\t"      // Restore state
175         ".cfi_adjust_cfa_offset -52\n\t"
176         "mov %[result], r0\n\t"     // Save the result
177         : [result] "=r" (result)
178           // Use the result from r0
179         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
180           [referrer] "r"(referrer), [hidden] "r"(hidden)
181         : "r0", "memory");  // clobber.
182 #elif defined(__aarch64__)
183     __asm__ __volatile__(
184         // Spill x0-x7 which we say we don't clobber. May contain args.
185         "sub sp, sp, #80\n\t"
186         ".cfi_adjust_cfa_offset 80\n\t"
187         "stp x0, x1, [sp]\n\t"
188         "stp x2, x3, [sp, #16]\n\t"
189         "stp x4, x5, [sp, #32]\n\t"
190         "stp x6, x7, [sp, #48]\n\t"
191         // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
192         // transition into the runtime via the blr instruction below and *not* save x20.
193         "stp x20, x21, [sp, #64]\n\t"
194 
195         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
196         ".cfi_adjust_cfa_offset 16\n\t"
197         "str %[referrer], [sp]\n\t"    // referrer
198 
199         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
200         "sub sp, sp, #48\n\t"
201         ".cfi_adjust_cfa_offset 48\n\t"
202         // All things are "r" constraints, so direct str/stp should work.
203         "stp %[arg0], %[arg1], [sp]\n\t"
204         "stp %[arg2], %[code], [sp, #16]\n\t"
205         "stp %[self], %[hidden], [sp, #32]\n\t"
206 
207         // Now we definitely have x0-x3 free, use it to garble d8 - d15
208         "movk x0, #0xfad0\n\t"
209         "movk x0, #0xebad, lsl #16\n\t"
210         "movk x0, #0xfad0, lsl #32\n\t"
211         "movk x0, #0xebad, lsl #48\n\t"
212         "fmov d8, x0\n\t"
213         "add x0, x0, 1\n\t"
214         "fmov d9, x0\n\t"
215         "add x0, x0, 1\n\t"
216         "fmov d10, x0\n\t"
217         "add x0, x0, 1\n\t"
218         "fmov d11, x0\n\t"
219         "add x0, x0, 1\n\t"
220         "fmov d12, x0\n\t"
221         "add x0, x0, 1\n\t"
222         "fmov d13, x0\n\t"
223         "add x0, x0, 1\n\t"
224         "fmov d14, x0\n\t"
225         "add x0, x0, 1\n\t"
226         "fmov d15, x0\n\t"
227 
228         // Load call params into the right registers.
229         "ldp x0, x1, [sp]\n\t"
230         "ldp x2, x3, [sp, #16]\n\t"
231         "ldp x19, x17, [sp, #32]\n\t"
232         "add sp, sp, #48\n\t"
233         ".cfi_adjust_cfa_offset -48\n\t"
234 
235         "blr x3\n\t"              // Call the stub
236         "mov x8, x0\n\t"          // Store result
237         "add sp, sp, #16\n\t"     // Drop the quick "frame"
238         ".cfi_adjust_cfa_offset -16\n\t"
239 
240         // Test d8 - d15. We can use x1 and x2.
241         "movk x1, #0xfad0\n\t"
242         "movk x1, #0xebad, lsl #16\n\t"
243         "movk x1, #0xfad0, lsl #32\n\t"
244         "movk x1, #0xebad, lsl #48\n\t"
245         "fmov x2, d8\n\t"
246         "cmp x1, x2\n\t"
247         "b.ne 1f\n\t"
248         "add x1, x1, 1\n\t"
249 
250         "fmov x2, d9\n\t"
251         "cmp x1, x2\n\t"
252         "b.ne 1f\n\t"
253         "add x1, x1, 1\n\t"
254 
255         "fmov x2, d10\n\t"
256         "cmp x1, x2\n\t"
257         "b.ne 1f\n\t"
258         "add x1, x1, 1\n\t"
259 
260         "fmov x2, d11\n\t"
261         "cmp x1, x2\n\t"
262         "b.ne 1f\n\t"
263         "add x1, x1, 1\n\t"
264 
265         "fmov x2, d12\n\t"
266         "cmp x1, x2\n\t"
267         "b.ne 1f\n\t"
268         "add x1, x1, 1\n\t"
269 
270         "fmov x2, d13\n\t"
271         "cmp x1, x2\n\t"
272         "b.ne 1f\n\t"
273         "add x1, x1, 1\n\t"
274 
275         "fmov x2, d14\n\t"
276         "cmp x1, x2\n\t"
277         "b.ne 1f\n\t"
278         "add x1, x1, 1\n\t"
279 
280         "fmov x2, d15\n\t"
281         "cmp x1, x2\n\t"
282         "b.ne 1f\n\t"
283 
284         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
285 
286         // Finish up.
287         "2:\n\t"
288         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
289         "ldp x2, x3, [sp, #16]\n\t"
290         "ldp x4, x5, [sp, #32]\n\t"
291         "ldp x6, x7, [sp, #48]\n\t"
292         "ldp x20, x21, [sp, #64]\n\t"
293         "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
294         ".cfi_adjust_cfa_offset -80\n\t"
295 
296         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
297         "mov %[result], x8\n\t"              // Store the call result
298 
299         "b 3f\n\t"                     // Goto end
300 
301         // Failed fpr verification.
302         "1:\n\t"
303         "mov x9, #1\n\t"
304         "b 2b\n\t"                     // Goto finish-up
305 
306         // End
307         "3:\n\t"
308         : [result] "=r" (result)
309           // Use the result from r0
310         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
311           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
312           // X18 is a reserved register, cannot be clobbered.
313           // Leave one register unclobbered, which is needed for compiling with
314           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
315           // which means we should unclobber one of the callee-saved registers that are unused.
316           // Here we use x20.
317           // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
318         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19",
319           "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
320           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
321           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
322           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
323           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
324           "memory");
325 #elif defined(__x86_64__) && !defined(__APPLE__)
326 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
327 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
328     // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
329     //       restoring almost all registers.
330     // TODO: Set the thread?
331     __asm__ __volatile__(
332         // Spill almost everything (except rax, rsp). 14 registers.
333         PUSH(%%rbx)
334         PUSH(%%rcx)
335         PUSH(%%rdx)
336         PUSH(%%rsi)
337         PUSH(%%rdi)
338         PUSH(%%rbp)
339         PUSH(%%r8)
340         PUSH(%%r9)
341         PUSH(%%r10)
342         PUSH(%%r11)
343         PUSH(%%r12)
344         PUSH(%%r13)
345         PUSH(%%r14)
346         PUSH(%%r15)
347 
348         PUSH(%[referrer])              // Push referrer & 16B alignment padding
349         PUSH(%[referrer])
350 
351         // Now juggle the input registers.
352         PUSH(%[arg0])
353         PUSH(%[arg1])
354         PUSH(%[arg2])
355         PUSH(%[hidden])
356         PUSH(%[code])
357         POP(%%r8)
358         POP(%%rax)
359         POP(%%rdx)
360         POP(%%rsi)
361         POP(%%rdi)
362 
363         "call *%%r8\n\t"                  // Call the stub
364         "addq $16, %%rsp\n\t"             // Pop null and padding
365         ".cfi_adjust_cfa_offset -16\n\t"
366 
367         POP(%%r15)
368         POP(%%r14)
369         POP(%%r13)
370         POP(%%r12)
371         POP(%%r11)
372         POP(%%r10)
373         POP(%%r9)
374         POP(%%r8)
375         POP(%%rbp)
376         POP(%%rdi)
377         POP(%%rsi)
378         POP(%%rdx)
379         POP(%%rcx)
380         POP(%%rbx)
381 
382         : "=a" (result)
383         // Use the result from rax
384         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
385           [referrer] "r"(referrer), [hidden] "r"(hidden)
386         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
387         // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
388         : "memory");  // We spill and restore (almost) all registers, so only mention memory here.
389 #undef PUSH
390 #undef POP
391 #else
392     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
393     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
394     result = 0;
395 #endif
396     // Pop transition.
397     self->PopManagedStackFragment(fragment);
398 
399     fp_result = fpr_result;
400     EXPECT_EQ(0U, fp_result);
401 
402     return result;
403   }
404 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)405   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
406     int32_t offset;
407     offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
408     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
409   }
410 
411  protected:
412   size_t fp_result;
413 };
414 
415 
TEST_F(StubTest,Memcpy)416 TEST_F(StubTest, Memcpy) {
417 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
418   Thread* self = Thread::Current();
419 
420   uint32_t orig[20];
421   uint32_t trg[20];
422   for (size_t i = 0; i < 20; ++i) {
423     orig[i] = i;
424     trg[i] = 0;
425   }
426 
427   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
428           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
429 
430   EXPECT_EQ(orig[0], trg[0]);
431 
432   for (size_t i = 1; i < 4; ++i) {
433     EXPECT_NE(orig[i], trg[i]);
434   }
435 
436   for (size_t i = 4; i < 14; ++i) {
437     EXPECT_EQ(orig[i], trg[i]);
438   }
439 
440   for (size_t i = 14; i < 20; ++i) {
441     EXPECT_NE(orig[i], trg[i]);
442   }
443 
444   // TODO: Test overlapping?
445 
446 #else
447   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
448   // Force-print to std::cout so it's also outside the logcat.
449   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
450 #endif
451 }
452 
TEST_F(StubTest,LockObject)453 TEST_F(StubTest, LockObject) {
454 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
455     (defined(__x86_64__) && !defined(__APPLE__))
456   static constexpr size_t kThinLockLoops = 100;
457 
458   Thread* self = Thread::Current();
459 
460   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
461 
462   // Create an object
463   ScopedObjectAccess soa(self);
464   // garbage is created during ClassLinker::Init
465 
466   StackHandleScope<2> hs(soa.Self());
467   Handle<mirror::String> obj(
468       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
469   LockWord lock = obj->GetLockWord(false);
470   LockWord::LockState old_state = lock.GetState();
471   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
472 
473   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
474 
475   LockWord lock_after = obj->GetLockWord(false);
476   LockWord::LockState new_state = lock_after.GetState();
477   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
478   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
479 
480   for (size_t i = 1; i < kThinLockLoops; ++i) {
481     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
482 
483     // Check we're at lock count i
484 
485     LockWord l_inc = obj->GetLockWord(false);
486     LockWord::LockState l_inc_state = l_inc.GetState();
487     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
488     EXPECT_EQ(l_inc.ThinLockCount(), i);
489   }
490 
491   // Force a fat lock by running identity hashcode to fill up lock word.
492   Handle<mirror::String> obj2(hs.NewHandle(
493       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
494 
495   obj2->IdentityHashCode();
496 
497   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
498 
499   LockWord lock_after2 = obj2->GetLockWord(false);
500   LockWord::LockState new_state2 = lock_after2.GetState();
501   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
502   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
503 
504   // Test done.
505 #else
506   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
507   // Force-print to std::cout so it's also outside the logcat.
508   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
509 #endif
510 }
511 
512 
513 class RandGen {
514  public:
RandGen(uint32_t seed)515   explicit RandGen(uint32_t seed) : val_(seed) {}
516 
next()517   uint32_t next() {
518     val_ = val_ * 48271 % 2147483647 + 13;
519     return val_;
520   }
521 
522   uint32_t val_;
523 };
524 
525 
526 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)527 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
528 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
529     (defined(__x86_64__) && !defined(__APPLE__))
530   static constexpr size_t kThinLockLoops = 100;
531 
532   Thread* self = Thread::Current();
533 
534   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
535   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
536   // Create an object
537   ScopedObjectAccess soa(self);
538   // garbage is created during ClassLinker::Init
539   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
540   StackHandleScope<kNumberOfLocks + 1> hs(self);
541   Handle<mirror::String> obj(
542       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
543   LockWord lock = obj->GetLockWord(false);
544   LockWord::LockState old_state = lock.GetState();
545   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
546 
547   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
548   // This should be an illegal monitor state.
549   EXPECT_TRUE(self->IsExceptionPending());
550   self->ClearException();
551 
552   LockWord lock_after = obj->GetLockWord(false);
553   LockWord::LockState new_state = lock_after.GetState();
554   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
555 
556   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
557 
558   LockWord lock_after2 = obj->GetLockWord(false);
559   LockWord::LockState new_state2 = lock_after2.GetState();
560   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
561 
562   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
563 
564   LockWord lock_after3 = obj->GetLockWord(false);
565   LockWord::LockState new_state3 = lock_after3.GetState();
566   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
567 
568   // Stress test:
569   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
570   // each step.
571 
572   RandGen r(0x1234);
573 
574   constexpr size_t kIterations = 10000;  // Number of iterations
575   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
576 
577   size_t counts[kNumberOfLocks];
578   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
579   Handle<mirror::String> objects[kNumberOfLocks];
580 
581   // Initialize = allocate.
582   for (size_t i = 0; i < kNumberOfLocks; ++i) {
583     counts[i] = 0;
584     fat[i] = false;
585     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
586   }
587 
588   for (size_t i = 0; i < kIterations; ++i) {
589     // Select which lock to update.
590     size_t index = r.next() % kNumberOfLocks;
591 
592     // Make lock fat?
593     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
594       fat[index] = true;
595       objects[index]->IdentityHashCode();
596 
597       LockWord lock_iter = objects[index]->GetLockWord(false);
598       LockWord::LockState iter_state = lock_iter.GetState();
599       if (counts[index] == 0) {
600         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
601       } else {
602         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
603       }
604     } else {
605       bool take_lock;  // Whether to lock or unlock in this step.
606       if (counts[index] == 0) {
607         take_lock = true;
608       } else if (counts[index] == kThinLockLoops) {
609         take_lock = false;
610       } else {
611         // Randomly.
612         take_lock = r.next() % 2 == 0;
613       }
614 
615       if (take_lock) {
616         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
617                       self);
618         counts[index]++;
619       } else {
620         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
621                       art_quick_unlock_object, self);
622         counts[index]--;
623       }
624 
625       EXPECT_FALSE(self->IsExceptionPending());
626 
627       // Check the new state.
628       LockWord lock_iter = objects[index]->GetLockWord(true);
629       LockWord::LockState iter_state = lock_iter.GetState();
630       if (fat[index]) {
631         // Abuse MonitorInfo.
632         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
633         MonitorInfo info(objects[index].Get());
634         EXPECT_EQ(counts[index], info.entry_count_) << index;
635       } else {
636         if (counts[index] > 0) {
637           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
638           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
639         } else {
640           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
641         }
642       }
643     }
644   }
645 
646   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
647   // Go reverse order to correctly handle Handles.
648   for (size_t i = 0; i < kNumberOfLocks; ++i) {
649     size_t index = kNumberOfLocks - 1 - i;
650     size_t count = counts[index];
651     while (count > 0) {
652       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
653                     self);
654       count--;
655     }
656 
657     LockWord lock_after4 = objects[index]->GetLockWord(false);
658     LockWord::LockState new_state4 = lock_after4.GetState();
659     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
660                 || LockWord::LockState::kFatLocked == new_state4);
661   }
662 
663   // Test done.
664 #else
665   UNUSED(test);
666   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
667   // Force-print to std::cout so it's also outside the logcat.
668   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
669 #endif
670 }
671 
TEST_F(StubTest,UnlockObject)672 TEST_F(StubTest, UnlockObject) {
673   // This will lead to monitor error messages in the log.
674   ScopedLogSeverity sls(LogSeverity::FATAL);
675 
676   TestUnlockObject(this);
677 }
678 
679 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
680     (defined(__x86_64__) && !defined(__APPLE__))
681 extern "C" void art_quick_check_instance_of(void);
682 #endif
683 
TEST_F(StubTest,CheckCast)684 TEST_F(StubTest, CheckCast) {
685 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
686     (defined(__x86_64__) && !defined(__APPLE__))
687   Thread* self = Thread::Current();
688 
689   const uintptr_t art_quick_check_instance_of =
690       StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
691 
692   // Find some classes.
693   ScopedObjectAccess soa(self);
694   // garbage is created during ClassLinker::Init
695 
696   VariableSizedHandleScope hs(soa.Self());
697   Handle<mirror::Class> klass_obj(
698       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
699   Handle<mirror::Class> klass_str(
700       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
701   Handle<mirror::Class> klass_list(
702       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
703   Handle<mirror::Class> klass_cloneable(
704         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
705   Handle<mirror::Class> klass_array_list(
706       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
707   Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
708   Handle<mirror::String> string(hs.NewHandle(
709       mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
710   Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
711 
712   EXPECT_FALSE(self->IsExceptionPending());
713 
714   Invoke3(reinterpret_cast<size_t>(obj.Get()),
715           reinterpret_cast<size_t>(klass_obj.Get()),
716           0U,
717           art_quick_check_instance_of,
718           self);
719   EXPECT_FALSE(self->IsExceptionPending());
720 
721   // Expected true: Test string instance of java.lang.String.
722   Invoke3(reinterpret_cast<size_t>(string.Get()),
723           reinterpret_cast<size_t>(klass_str.Get()),
724           0U,
725           art_quick_check_instance_of,
726           self);
727   EXPECT_FALSE(self->IsExceptionPending());
728 
729   // Expected true: Test string instance of java.lang.Object.
730   Invoke3(reinterpret_cast<size_t>(string.Get()),
731           reinterpret_cast<size_t>(klass_obj.Get()),
732           0U,
733           art_quick_check_instance_of,
734           self);
735   EXPECT_FALSE(self->IsExceptionPending());
736 
737   // Expected false: Test object instance of java.lang.String.
738   Invoke3(reinterpret_cast<size_t>(obj.Get()),
739           reinterpret_cast<size_t>(klass_str.Get()),
740           0U,
741           art_quick_check_instance_of,
742           self);
743   EXPECT_TRUE(self->IsExceptionPending());
744   self->ClearException();
745 
746   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
747           reinterpret_cast<size_t>(klass_list.Get()),
748           0U,
749           art_quick_check_instance_of,
750           self);
751   EXPECT_FALSE(self->IsExceptionPending());
752 
753   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
754           reinterpret_cast<size_t>(klass_cloneable.Get()),
755           0U,
756           art_quick_check_instance_of,
757           self);
758   EXPECT_FALSE(self->IsExceptionPending());
759 
760   Invoke3(reinterpret_cast<size_t>(string.Get()),
761           reinterpret_cast<size_t>(klass_array_list.Get()),
762           0U,
763           art_quick_check_instance_of,
764           self);
765   EXPECT_TRUE(self->IsExceptionPending());
766   self->ClearException();
767 
768   Invoke3(reinterpret_cast<size_t>(string.Get()),
769           reinterpret_cast<size_t>(klass_cloneable.Get()),
770           0U,
771           art_quick_check_instance_of,
772           self);
773   EXPECT_TRUE(self->IsExceptionPending());
774   self->ClearException();
775 
776 #else
777   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
778   // Force-print to std::cout so it's also outside the logcat.
779   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
780 #endif
781 }
782 
TEST_F(StubTest,AllocObject)783 TEST_F(StubTest, AllocObject) {
784 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
785     (defined(__x86_64__) && !defined(__APPLE__))
786   // This will lead to OOM  error messages in the log.
787   ScopedLogSeverity sls(LogSeverity::FATAL);
788 
789   // TODO: Check the "Unresolved" allocation stubs
790 
791   Thread* self = Thread::Current();
792   // Create an object
793   ScopedObjectAccess soa(self);
794   // garbage is created during ClassLinker::Init
795 
796   StackHandleScope<2> hs(soa.Self());
797   Handle<mirror::Class> c(
798       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
799 
800   // Play with it...
801 
802   EXPECT_FALSE(self->IsExceptionPending());
803   {
804     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
805                             StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
806                             self);
807 
808     EXPECT_FALSE(self->IsExceptionPending());
809     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
810     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
811     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
812     VerifyObject(obj);
813   }
814 
815   {
816     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
817                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
818                             self);
819 
820     EXPECT_FALSE(self->IsExceptionPending());
821     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
822     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
823     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
824     VerifyObject(obj);
825   }
826 
827   {
828     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
829                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
830                             self);
831 
832     EXPECT_FALSE(self->IsExceptionPending());
833     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
834     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
835     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
836     VerifyObject(obj);
837   }
838 
839   // Failure tests.
840 
841   // Out-of-memory.
842   {
843     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
844 
845     // Array helps to fill memory faster.
846     Handle<mirror::Class> ca(
847         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
848 
849     // Use arbitrary large amount for now.
850     static const size_t kMaxHandles = 1000000;
851     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
852 
853     std::vector<Handle<mirror::Object>> handles;
854     // Start allocating with 128K
855     size_t length = 128 * KB / 4;
856     while (length > 10) {
857       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
858           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
859       if (self->IsExceptionPending() || h == nullptr) {
860         self->ClearException();
861 
862         // Try a smaller length
863         length = length / 8;
864         // Use at most half the reported free space.
865         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
866         if (length * 8 > mem) {
867           length = mem / 8;
868         }
869       } else {
870         handles.push_back(h);
871       }
872     }
873     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
874 
875     // Allocate simple objects till it fails.
876     while (!self->IsExceptionPending()) {
877       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
878       if (!self->IsExceptionPending() && h != nullptr) {
879         handles.push_back(h);
880       }
881     }
882     self->ClearException();
883 
884     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
885                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
886                             self);
887     EXPECT_TRUE(self->IsExceptionPending());
888     self->ClearException();
889     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
890   }
891 
892   // Tests done.
893 #else
894   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
895   // Force-print to std::cout so it's also outside the logcat.
896   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
897 #endif
898 }
899 
TEST_F(StubTest,AllocObjectArray)900 TEST_F(StubTest, AllocObjectArray) {
901 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
902     (defined(__x86_64__) && !defined(__APPLE__))
903   // TODO: Check the "Unresolved" allocation stubs
904 
905   // This will lead to OOM  error messages in the log.
906   ScopedLogSeverity sls(LogSeverity::FATAL);
907 
908   Thread* self = Thread::Current();
909   // Create an object
910   ScopedObjectAccess soa(self);
911   // garbage is created during ClassLinker::Init
912 
913   StackHandleScope<1> hs(self);
914   Handle<mirror::Class> c(
915       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
916 
917   // Play with it...
918 
919   EXPECT_FALSE(self->IsExceptionPending());
920 
921   {
922     // We can use null in the second argument as we do not need a method here (not used in
923     // resolved/initialized cases)
924     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
925                             reinterpret_cast<size_t>(nullptr),
926                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
927                             self);
928     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
929     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
930     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
931     EXPECT_TRUE(obj->IsArrayInstance());
932     EXPECT_TRUE(obj->IsObjectArray());
933     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
934     VerifyObject(obj);
935     ObjPtr<mirror::Array> array = reinterpret_cast<mirror::Array*>(result);
936     EXPECT_EQ(array->GetLength(), 10);
937   }
938 
939   // Failure tests.
940 
941   // Out-of-memory.
942   {
943     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
944                             GB,  // that should fail...
945                             reinterpret_cast<size_t>(nullptr),
946                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
947                             self);
948 
949     EXPECT_TRUE(self->IsExceptionPending());
950     self->ClearException();
951     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
952   }
953 
954   // Tests done.
955 #else
956   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
957   // Force-print to std::cout so it's also outside the logcat.
958   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
959 #endif
960 }
961 
962 
TEST_F(StubTest,StringCompareTo)963 TEST_F(StubTest, StringCompareTo) {
964   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
965 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
966   // TODO: Check the "Unresolved" allocation stubs
967 
968   Thread* self = Thread::Current();
969 
970   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
971 
972   ScopedObjectAccess soa(self);
973   // garbage is created during ClassLinker::Init
974 
975   // Create some strings
976   // Use array so we can index into it and use a matrix for expected results
977   // Setup: The first half is standard. The second half uses a non-zero offset.
978   // TODO: Shared backing arrays.
979   const char* c[] = { "", "", "a", "aa", "ab",
980       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
981       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
982       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
983                                                   // defeat object-equal optimizations.
984   static constexpr size_t kStringCount = arraysize(c);
985 
986   StackHandleScope<kStringCount> hs(self);
987   Handle<mirror::String> s[kStringCount];
988 
989   for (size_t i = 0; i < kStringCount; ++i) {
990     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
991   }
992 
993   // TODO: wide characters
994 
995   // Matrix of expectations. First component is first parameter. Note we only check against the
996   // sign, not the value. As we are testing random offsets, we need to compute this and need to
997   // rely on String::CompareTo being correct.
998   int32_t expected[kStringCount][kStringCount];
999   for (size_t x = 0; x < kStringCount; ++x) {
1000     for (size_t y = 0; y < kStringCount; ++y) {
1001       expected[x][y] = s[x]->CompareTo(s[y].Get());
1002     }
1003   }
1004 
1005   // Play with it...
1006 
1007   for (size_t x = 0; x < kStringCount; ++x) {
1008     for (size_t y = 0; y < kStringCount; ++y) {
1009       // Test string_compareto x y
1010       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1011                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1012                               art_quick_string_compareto, self);
1013 
1014       EXPECT_FALSE(self->IsExceptionPending());
1015 
1016       // The result is a 32b signed integer
1017       union {
1018         size_t r;
1019         int32_t i;
1020       } conv;
1021       conv.r = result;
1022       int32_t e = expected[x][y];
1023       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1024           conv.r;
1025       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1026           conv.r;
1027       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1028           conv.r;
1029     }
1030   }
1031 
1032   // TODO: Deallocate things.
1033 
1034   // Tests done.
1035 #else
1036   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1037   // Force-print to std::cout so it's also outside the logcat.
1038   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1039       std::endl;
1040 #endif
1041 }
1042 
1043 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1044 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1045                                 ArtMethod* referrer, StubTest* test)
1046     REQUIRES_SHARED(Locks::mutator_lock_) {
1047 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1048     (defined(__x86_64__) && !defined(__APPLE__))
1049   constexpr size_t num_values = 5;
1050   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1051 
1052   for (size_t i = 0; i < num_values; ++i) {
1053     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1054                               static_cast<size_t>(values[i]),
1055                               0U,
1056                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1057                               self,
1058                               referrer);
1059 
1060     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1061                                            0U, 0U,
1062                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1063                                            self,
1064                                            referrer);
1065     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1066     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1067   }
1068 #else
1069   UNUSED(f, self, referrer, test);
1070   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1071   // Force-print to std::cout so it's also outside the logcat.
1072   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1073 #endif
1074 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1075 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1076                              StubTest* test)
1077     REQUIRES_SHARED(Locks::mutator_lock_) {
1078 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1079     (defined(__x86_64__) && !defined(__APPLE__))
1080   int8_t values[] = { -128, -64, 0, 64, 127 };
1081 
1082   for (size_t i = 0; i < arraysize(values); ++i) {
1083     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1084                               static_cast<size_t>(values[i]),
1085                               0U,
1086                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1087                               self,
1088                               referrer);
1089 
1090     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1091                                            0U, 0U,
1092                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1093                                            self,
1094                                            referrer);
1095     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1096   }
1097 #else
1098   UNUSED(f, self, referrer, test);
1099   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1100   // Force-print to std::cout so it's also outside the logcat.
1101   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1102 #endif
1103 }
1104 
1105 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1106 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1107                                   ArtMethod* referrer, StubTest* test)
1108     REQUIRES_SHARED(Locks::mutator_lock_) {
1109 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1110     (defined(__x86_64__) && !defined(__APPLE__))
1111   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1112 
1113   for (size_t i = 0; i < arraysize(values); ++i) {
1114     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1115                               reinterpret_cast<size_t>(obj->Get()),
1116                               static_cast<size_t>(values[i]),
1117                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1118                               self,
1119                               referrer);
1120 
1121     uint8_t res = f->GetBoolean(obj->Get());
1122     EXPECT_EQ(values[i], res) << "Iteration " << i;
1123 
1124     f->SetBoolean<false>(obj->Get(), res);
1125 
1126     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1127                                             reinterpret_cast<size_t>(obj->Get()),
1128                                             0U,
1129                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1130                                             self,
1131                                             referrer);
1132     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1133   }
1134 #else
1135   UNUSED(obj, f, self, referrer, test);
1136   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1137   // Force-print to std::cout so it's also outside the logcat.
1138   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1139 #endif
1140 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1141 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1142                              Thread* self, ArtMethod* referrer, StubTest* test)
1143     REQUIRES_SHARED(Locks::mutator_lock_) {
1144 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1145     (defined(__x86_64__) && !defined(__APPLE__))
1146   int8_t values[] = { -128, -64, 0, 64, 127 };
1147 
1148   for (size_t i = 0; i < arraysize(values); ++i) {
1149     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1150                               reinterpret_cast<size_t>(obj->Get()),
1151                               static_cast<size_t>(values[i]),
1152                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1153                               self,
1154                               referrer);
1155 
1156     int8_t res = f->GetByte(obj->Get());
1157     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1158     f->SetByte<false>(obj->Get(), ++res);
1159 
1160     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1161                                             reinterpret_cast<size_t>(obj->Get()),
1162                                             0U,
1163                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1164                                             self,
1165                                             referrer);
1166     EXPECT_EQ(res, static_cast<int8_t>(res2));
1167   }
1168 #else
1169   UNUSED(obj, f, self, referrer, test);
1170   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1171   // Force-print to std::cout so it's also outside the logcat.
1172   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1173 #endif
1174 }
1175 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1176 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1177                              StubTest* test)
1178     REQUIRES_SHARED(Locks::mutator_lock_) {
1179 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1180     (defined(__x86_64__) && !defined(__APPLE__))
1181   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1182 
1183   for (size_t i = 0; i < arraysize(values); ++i) {
1184     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1185                               static_cast<size_t>(values[i]),
1186                               0U,
1187                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1188                               self,
1189                               referrer);
1190 
1191     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1192                                            0U, 0U,
1193                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1194                                            self,
1195                                            referrer);
1196 
1197     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1198   }
1199 #else
1200   UNUSED(f, self, referrer, test);
1201   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1202   // Force-print to std::cout so it's also outside the logcat.
1203   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1204 #endif
1205 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1206 static void GetSetShortStatic(ArtField* f, Thread* self,
1207                               ArtMethod* referrer, StubTest* test)
1208     REQUIRES_SHARED(Locks::mutator_lock_) {
1209 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1210     (defined(__x86_64__) && !defined(__APPLE__))
1211   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1212 
1213   for (size_t i = 0; i < arraysize(values); ++i) {
1214     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1215                               static_cast<size_t>(values[i]),
1216                               0U,
1217                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1218                               self,
1219                               referrer);
1220 
1221     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1222                                            0U, 0U,
1223                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1224                                            self,
1225                                            referrer);
1226 
1227     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1228   }
1229 #else
1230   UNUSED(f, self, referrer, test);
1231   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1232   // Force-print to std::cout so it's also outside the logcat.
1233   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1234 #endif
1235 }
1236 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1237 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1238                                Thread* self, ArtMethod* referrer, StubTest* test)
1239     REQUIRES_SHARED(Locks::mutator_lock_) {
1240 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1241     (defined(__x86_64__) && !defined(__APPLE__))
1242   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1243 
1244   for (size_t i = 0; i < arraysize(values); ++i) {
1245     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1246                               reinterpret_cast<size_t>(obj->Get()),
1247                               static_cast<size_t>(values[i]),
1248                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1249                               self,
1250                               referrer);
1251 
1252     uint16_t res = f->GetChar(obj->Get());
1253     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1254     f->SetChar<false>(obj->Get(), ++res);
1255 
1256     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1257                                             reinterpret_cast<size_t>(obj->Get()),
1258                                             0U,
1259                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1260                                             self,
1261                                             referrer);
1262     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1263   }
1264 #else
1265   UNUSED(obj, f, self, referrer, test);
1266   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1267   // Force-print to std::cout so it's also outside the logcat.
1268   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1269 #endif
1270 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1271 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1272                              Thread* self, ArtMethod* referrer, StubTest* test)
1273     REQUIRES_SHARED(Locks::mutator_lock_) {
1274 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1275     (defined(__x86_64__) && !defined(__APPLE__))
1276   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1277 
1278   for (size_t i = 0; i < arraysize(values); ++i) {
1279     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1280                               reinterpret_cast<size_t>(obj->Get()),
1281                               static_cast<size_t>(values[i]),
1282                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1283                               self,
1284                               referrer);
1285 
1286     int16_t res = f->GetShort(obj->Get());
1287     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1288     f->SetShort<false>(obj->Get(), ++res);
1289 
1290     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1291                                             reinterpret_cast<size_t>(obj->Get()),
1292                                             0U,
1293                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1294                                             self,
1295                                             referrer);
1296     EXPECT_EQ(res, static_cast<int16_t>(res2));
1297   }
1298 #else
1299   UNUSED(obj, f, self, referrer, test);
1300   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1301   // Force-print to std::cout so it's also outside the logcat.
1302   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1303 #endif
1304 }
1305 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1306 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1307                            StubTest* test)
1308     REQUIRES_SHARED(Locks::mutator_lock_) {
1309 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1310     (defined(__x86_64__) && !defined(__APPLE__))
1311   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1312 
1313   for (size_t i = 0; i < arraysize(values); ++i) {
1314     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1315                               static_cast<size_t>(values[i]),
1316                               0U,
1317                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1318                               self,
1319                               referrer);
1320 
1321     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1322                                            0U, 0U,
1323                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1324                                            self,
1325                                            referrer);
1326 
1327     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1328   }
1329 #else
1330   UNUSED(f, self, referrer, test);
1331   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1332   // Force-print to std::cout so it's also outside the logcat.
1333   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1334 #endif
1335 }
1336 
1337 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1338 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1339                              Thread* self, ArtMethod* referrer, StubTest* test)
1340     REQUIRES_SHARED(Locks::mutator_lock_) {
1341 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1342     (defined(__x86_64__) && !defined(__APPLE__))
1343   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1344 
1345   for (size_t i = 0; i < arraysize(values); ++i) {
1346     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1347                               reinterpret_cast<size_t>(obj->Get()),
1348                               static_cast<size_t>(values[i]),
1349                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1350                               self,
1351                               referrer);
1352 
1353     int32_t res = f->GetInt(obj->Get());
1354     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1355 
1356     res++;
1357     f->SetInt<false>(obj->Get(), res);
1358 
1359     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1360                                             reinterpret_cast<size_t>(obj->Get()),
1361                                             0U,
1362                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1363                                             self,
1364                                             referrer);
1365     EXPECT_EQ(res, static_cast<int32_t>(res2));
1366   }
1367 #else
1368   UNUSED(obj, f, self, referrer, test);
1369   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1370   // Force-print to std::cout so it's also outside the logcat.
1371   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1372 #endif
1373 }
1374 
1375 
1376 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1377     (defined(__x86_64__) && !defined(__APPLE__))
1378 
set_and_check_static(uint32_t f_idx,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1379 static void set_and_check_static(uint32_t f_idx,
1380                                  ObjPtr<mirror::Object> val,
1381                                  Thread* self,
1382                                  ArtMethod* referrer,
1383                                  StubTest* test)
1384     REQUIRES_SHARED(Locks::mutator_lock_) {
1385   StackHandleScope<1u> hs(self);
1386   Handle<mirror::Object> h_val = hs.NewHandle(val);
1387   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1388                             reinterpret_cast<size_t>(h_val.Get()),
1389                             0U,
1390                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1391                             self,
1392                             referrer);
1393 
1394   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1395                                          0U,
1396                                          0U,
1397                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1398                                          self,
1399                                          referrer);
1400 
1401   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1402 }
1403 #endif
1404 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1405 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1406                             StubTest* test)
1407     REQUIRES_SHARED(Locks::mutator_lock_) {
1408 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1409     (defined(__x86_64__) && !defined(__APPLE__))
1410   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1411 
1412   // Allocate a string object for simplicity.
1413   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1414   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1415 
1416   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1417 #else
1418   UNUSED(f, self, referrer, test);
1419   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1420   // Force-print to std::cout so it's also outside the logcat.
1421   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1422 #endif
1423 }
1424 
1425 
1426 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1427     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,ObjPtr<mirror::Object> trg,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1428 static void set_and_check_instance(ArtField* f,
1429                                    ObjPtr<mirror::Object> trg,
1430                                    ObjPtr<mirror::Object> val,
1431                                    Thread* self,
1432                                    ArtMethod* referrer,
1433                                    StubTest* test)
1434     REQUIRES_SHARED(Locks::mutator_lock_) {
1435   StackHandleScope<2u> hs(self);
1436   Handle<mirror::Object> h_trg = hs.NewHandle(trg);
1437   Handle<mirror::Object> h_val = hs.NewHandle(val);
1438   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1439                             reinterpret_cast<size_t>(h_trg.Get()),
1440                             reinterpret_cast<size_t>(h_val.Get()),
1441                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1442                             self,
1443                             referrer);
1444 
1445   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1446                                          reinterpret_cast<size_t>(h_trg.Get()),
1447                                          0U,
1448                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1449                                          self,
1450                                          referrer);
1451 
1452   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1453 
1454   EXPECT_OBJ_PTR_EQ(h_val.Get(), f->GetObj(h_trg.Get()));
1455 }
1456 #endif
1457 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1458 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1459                               Thread* self, ArtMethod* referrer, StubTest* test)
1460     REQUIRES_SHARED(Locks::mutator_lock_) {
1461 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1462     (defined(__x86_64__) && !defined(__APPLE__))
1463   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1464 
1465   // Allocate a string object for simplicity.
1466   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1467   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1468 
1469   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1470 #else
1471   UNUSED(obj, f, self, referrer, test);
1472   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1473   // Force-print to std::cout so it's also outside the logcat.
1474   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1475 #endif
1476 }
1477 
1478 
1479 // TODO: Complete these tests for 32b architectures
1480 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1481 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1482                            StubTest* test)
1483     REQUIRES_SHARED(Locks::mutator_lock_) {
1484 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1485   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1486 
1487   for (size_t i = 0; i < arraysize(values); ++i) {
1488     // 64 bit FieldSet stores the set value in the second register.
1489     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1490                               values[i],
1491                               0U,
1492                               StubTest::GetEntrypoint(self, kQuickSet64Static),
1493                               self,
1494                               referrer);
1495 
1496     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1497                                            0U, 0U,
1498                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1499                                            self,
1500                                            referrer);
1501 
1502     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1503   }
1504 #else
1505   UNUSED(f, self, referrer, test);
1506   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1507   // Force-print to std::cout so it's also outside the logcat.
1508   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1509 #endif
1510 }
1511 
1512 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1513 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1514                              Thread* self, ArtMethod* referrer, StubTest* test)
1515     REQUIRES_SHARED(Locks::mutator_lock_) {
1516 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1517   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1518 
1519   for (size_t i = 0; i < arraysize(values); ++i) {
1520     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1521                               reinterpret_cast<size_t>(obj->Get()),
1522                               static_cast<size_t>(values[i]),
1523                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1524                               self,
1525                               referrer);
1526 
1527     int64_t res = f->GetLong(obj->Get());
1528     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1529 
1530     res++;
1531     f->SetLong<false>(obj->Get(), res);
1532 
1533     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1534                                             reinterpret_cast<size_t>(obj->Get()),
1535                                             0U,
1536                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1537                                             self,
1538                                             referrer);
1539     EXPECT_EQ(res, static_cast<int64_t>(res2));
1540   }
1541 #else
1542   UNUSED(obj, f, self, referrer, test);
1543   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1544   // Force-print to std::cout so it's also outside the logcat.
1545   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1546 #endif
1547 }
1548 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1549 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1550   // garbage is created during ClassLinker::Init
1551 
1552   JNIEnv* env = Thread::Current()->GetJniEnv();
1553   jclass jc = env->FindClass("AllFields");
1554   CHECK(jc != nullptr);
1555   jobject o = env->AllocObject(jc);
1556   CHECK(o != nullptr);
1557 
1558   ScopedObjectAccess soa(self);
1559   StackHandleScope<3> hs(self);
1560   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(o)));
1561   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1562   // Need a method as a referrer
1563   ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
1564 
1565   // Play with it...
1566 
1567   // Static fields.
1568   for (ArtField& f : c->GetFields()) {
1569     if (!f.IsStatic()) {
1570       continue;
1571     }
1572     Primitive::Type type = f.GetTypeAsPrimitiveType();
1573     if (test_type != type) {
1574      continue;
1575     }
1576     switch (type) {
1577       case Primitive::Type::kPrimBoolean:
1578         GetSetBooleanStatic(&f, self, m, test);
1579         break;
1580       case Primitive::Type::kPrimByte:
1581         GetSetByteStatic(&f, self, m, test);
1582         break;
1583       case Primitive::Type::kPrimChar:
1584         GetSetCharStatic(&f, self, m, test);
1585         break;
1586       case Primitive::Type::kPrimShort:
1587         GetSetShortStatic(&f, self, m, test);
1588         break;
1589       case Primitive::Type::kPrimInt:
1590         GetSet32Static(&f, self, m, test);
1591         break;
1592       case Primitive::Type::kPrimLong:
1593         GetSet64Static(&f, self, m, test);
1594         break;
1595       case Primitive::Type::kPrimNot:
1596         // Don't try array.
1597         if (f.GetTypeDescriptor()[0] != '[') {
1598           GetSetObjStatic(&f, self, m, test);
1599         }
1600         break;
1601       default:
1602         break;  // Skip.
1603     }
1604   }
1605 
1606   // Instance fields.
1607   for (ArtField& f : c->GetFields()) {
1608     if (f.IsStatic()) {
1609       continue;
1610     }
1611     Primitive::Type type = f.GetTypeAsPrimitiveType();
1612     if (test_type != type) {
1613       continue;
1614     }
1615     switch (type) {
1616       case Primitive::Type::kPrimBoolean:
1617         GetSetBooleanInstance(&obj, &f, self, m, test);
1618         break;
1619       case Primitive::Type::kPrimByte:
1620         GetSetByteInstance(&obj, &f, self, m, test);
1621         break;
1622       case Primitive::Type::kPrimChar:
1623         GetSetCharInstance(&obj, &f, self, m, test);
1624         break;
1625       case Primitive::Type::kPrimShort:
1626         GetSetShortInstance(&obj, &f, self, m, test);
1627         break;
1628       case Primitive::Type::kPrimInt:
1629         GetSet32Instance(&obj, &f, self, m, test);
1630         break;
1631       case Primitive::Type::kPrimLong:
1632         GetSet64Instance(&obj, &f, self, m, test);
1633         break;
1634       case Primitive::Type::kPrimNot:
1635         // Don't try array.
1636         if (f.GetTypeDescriptor()[0] != '[') {
1637           GetSetObjInstance(&obj, &f, self, m, test);
1638         }
1639         break;
1640       default:
1641         break;  // Skip.
1642     }
1643   }
1644 
1645   // TODO: Deallocate things.
1646 }
1647 
TEST_F(StubTest,Fields8)1648 TEST_F(StubTest, Fields8) {
1649   Thread* self = Thread::Current();
1650 
1651   self->TransitionFromSuspendedToRunnable();
1652   LoadDex("AllFields");
1653   bool started = runtime_->Start();
1654   CHECK(started);
1655 
1656   TestFields(self, this, Primitive::Type::kPrimBoolean);
1657   TestFields(self, this, Primitive::Type::kPrimByte);
1658 }
1659 
TEST_F(StubTest,Fields16)1660 TEST_F(StubTest, Fields16) {
1661   Thread* self = Thread::Current();
1662 
1663   self->TransitionFromSuspendedToRunnable();
1664   LoadDex("AllFields");
1665   bool started = runtime_->Start();
1666   CHECK(started);
1667 
1668   TestFields(self, this, Primitive::Type::kPrimChar);
1669   TestFields(self, this, Primitive::Type::kPrimShort);
1670 }
1671 
TEST_F(StubTest,Fields32)1672 TEST_F(StubTest, Fields32) {
1673   Thread* self = Thread::Current();
1674 
1675   self->TransitionFromSuspendedToRunnable();
1676   LoadDex("AllFields");
1677   bool started = runtime_->Start();
1678   CHECK(started);
1679 
1680   TestFields(self, this, Primitive::Type::kPrimInt);
1681 }
1682 
TEST_F(StubTest,FieldsObj)1683 TEST_F(StubTest, FieldsObj) {
1684   Thread* self = Thread::Current();
1685 
1686   self->TransitionFromSuspendedToRunnable();
1687   LoadDex("AllFields");
1688   bool started = runtime_->Start();
1689   CHECK(started);
1690 
1691   TestFields(self, this, Primitive::Type::kPrimNot);
1692 }
1693 
TEST_F(StubTest,Fields64)1694 TEST_F(StubTest, Fields64) {
1695   Thread* self = Thread::Current();
1696 
1697   self->TransitionFromSuspendedToRunnable();
1698   LoadDex("AllFields");
1699   bool started = runtime_->Start();
1700   CHECK(started);
1701 
1702   TestFields(self, this, Primitive::Type::kPrimLong);
1703 }
1704 
1705 // Disabled, b/27991555 .
1706 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1707 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1708 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1709 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1710 TEST_F(StubTest, DISABLED_IMT) {
1711 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1712     (defined(__x86_64__) && !defined(__APPLE__))
1713   Thread* self = Thread::Current();
1714 
1715   ScopedObjectAccess soa(self);
1716   StackHandleScope<7> hs(self);
1717 
1718   JNIEnv* env = Thread::Current()->GetJniEnv();
1719 
1720   // ArrayList
1721 
1722   // Load ArrayList and used methods (JNI).
1723   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1724   ASSERT_NE(nullptr, arraylist_jclass);
1725   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1726   ASSERT_NE(nullptr, arraylist_constructor);
1727   jmethodID contains_jmethod = env->GetMethodID(
1728       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1729   ASSERT_NE(nullptr, contains_jmethod);
1730   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1731   ASSERT_NE(nullptr, add_jmethod);
1732 
1733   // Get representation.
1734   ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
1735 
1736   // Patch up ArrayList.contains.
1737   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1738     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1739         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1740   }
1741 
1742   // List
1743 
1744   // Load List and used methods (JNI).
1745   jclass list_jclass = env->FindClass("java/util/List");
1746   ASSERT_NE(nullptr, list_jclass);
1747   jmethodID inf_contains_jmethod = env->GetMethodID(
1748       list_jclass, "contains", "(Ljava/lang/Object;)Z");
1749   ASSERT_NE(nullptr, inf_contains_jmethod);
1750 
1751   // Get mirror representation.
1752   ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
1753 
1754   // Object
1755 
1756   jclass obj_jclass = env->FindClass("java/lang/Object");
1757   ASSERT_NE(nullptr, obj_jclass);
1758   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1759   ASSERT_NE(nullptr, obj_constructor);
1760 
1761   // Create instances.
1762 
1763   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1764   ASSERT_NE(nullptr, jarray_list);
1765   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object>(jarray_list)));
1766 
1767   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1768   ASSERT_NE(nullptr, jobj);
1769   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(jobj)));
1770 
1771   // Invocation tests.
1772 
1773   // 1. imt_conflict
1774 
1775   // Contains.
1776 
1777   // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
1778   // that will create it: the runtime stub expects to be called by compiled code.
1779   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
1780   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
1781   ImtConflictTable* empty_conflict_table =
1782       Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
1783   void* data = linear_alloc->Alloc(
1784       self,
1785       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize),
1786       LinearAllocKind::kNoGCRoots);
1787   ImtConflictTable* new_table = new (data) ImtConflictTable(
1788       empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
1789   conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
1790 
1791   size_t result =
1792       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1793                                    reinterpret_cast<size_t>(array_list.Get()),
1794                                    reinterpret_cast<size_t>(obj.Get()),
1795                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1796                                    self,
1797                                    contains_amethod,
1798                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1799 
1800   ASSERT_FALSE(self->IsExceptionPending());
1801   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1802 
1803   // Add object.
1804 
1805   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1806 
1807   ASSERT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1808 
1809   // Contains.
1810 
1811   result =
1812       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1813                                    reinterpret_cast<size_t>(array_list.Get()),
1814                                    reinterpret_cast<size_t>(obj.Get()),
1815                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1816                                    self,
1817                                    contains_amethod,
1818                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1819 
1820   ASSERT_FALSE(self->IsExceptionPending());
1821   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1822 
1823   // 2. regular interface trampoline
1824 
1825   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1826                                reinterpret_cast<size_t>(array_list.Get()),
1827                                reinterpret_cast<size_t>(obj.Get()),
1828                                StubTest::GetEntrypoint(self,
1829                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
1830                                self, contains_amethod);
1831 
1832   ASSERT_FALSE(self->IsExceptionPending());
1833   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1834 
1835   result = Invoke3WithReferrer(
1836       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1837       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
1838       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
1839       contains_amethod);
1840 
1841   ASSERT_FALSE(self->IsExceptionPending());
1842   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1843 #else
1844   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1845   // Force-print to std::cout so it's also outside the logcat.
1846   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1847 #endif
1848 }
1849 
TEST_F(StubTest,StringIndexOf)1850 TEST_F(StubTest, StringIndexOf) {
1851 #if defined(__arm__) || defined(__aarch64__)
1852   Thread* self = Thread::Current();
1853   ScopedObjectAccess soa(self);
1854   // garbage is created during ClassLinker::Init
1855 
1856   // Create some strings
1857   // Use array so we can index into it and use a matrix for expected results
1858   // Setup: The first half is standard. The second half uses a non-zero offset.
1859   // TODO: Shared backing arrays.
1860   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1861   static constexpr size_t kStringCount = arraysize(c_str);
1862   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
1863   static constexpr size_t kCharCount = arraysize(c_char);
1864 
1865   StackHandleScope<kStringCount> hs(self);
1866   Handle<mirror::String> s[kStringCount];
1867 
1868   for (size_t i = 0; i < kStringCount; ++i) {
1869     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
1870   }
1871 
1872   // Matrix of expectations. First component is first parameter. Note we only check against the
1873   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1874   // rely on String::CompareTo being correct.
1875   static constexpr size_t kMaxLen = 9;
1876   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
1877 
1878   // Last dimension: start, offset by 1.
1879   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
1880   for (size_t x = 0; x < kStringCount; ++x) {
1881     for (size_t y = 0; y < kCharCount; ++y) {
1882       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1883         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
1884       }
1885     }
1886   }
1887 
1888   // Play with it...
1889 
1890   for (size_t x = 0; x < kStringCount; ++x) {
1891     for (size_t y = 0; y < kCharCount; ++y) {
1892       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1893         int32_t start = static_cast<int32_t>(z) - 1;
1894 
1895         // Test string_compareto x y
1896         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
1897                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
1898 
1899         EXPECT_FALSE(self->IsExceptionPending());
1900 
1901         // The result is a 32b signed integer
1902         union {
1903           size_t r;
1904           int32_t i;
1905         } conv;
1906         conv.r = result;
1907 
1908         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
1909             c_char[y] << " @ " << start;
1910       }
1911     }
1912   }
1913 
1914   // TODO: Deallocate things.
1915 
1916   // Tests done.
1917 #else
1918   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
1919   // Force-print to std::cout so it's also outside the logcat.
1920   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
1921 #endif
1922 }
1923 
1924 // TODO: Exercise the ReadBarrierMarkRegX entry points.
1925 
TEST_F(StubTest,ReadBarrier)1926 TEST_F(StubTest, ReadBarrier) {
1927 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) ||\
1928       (defined(__x86_64__) && !defined(__APPLE__))
1929   if (gUseReadBarrier) {
1930     Thread* self = Thread::Current();
1931 
1932     const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
1933 
1934     // Create an object
1935     ScopedObjectAccess soa(self);
1936     // garbage is created during ClassLinker::Init
1937 
1938     StackHandleScope<2> hs(soa.Self());
1939     Handle<mirror::Class> c(
1940         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1941 
1942     // Build an object instance
1943     Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
1944 
1945     EXPECT_FALSE(self->IsExceptionPending());
1946 
1947     size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
1948                             mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
1949 
1950     EXPECT_FALSE(self->IsExceptionPending());
1951     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1952     mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
1953     EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
1954     return;
1955   }
1956 #endif
1957   LOG(INFO) << "Skipping read_barrier_slow";
1958   // Force-print to std::cout so it's also outside the logcat.
1959   std::cout << "Skipping read_barrier_slow" << std::endl;
1960 }
1961 
TEST_F(StubTest,ReadBarrierForRoot)1962 TEST_F(StubTest, ReadBarrierForRoot) {
1963 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) ||\
1964       (defined(__x86_64__) && !defined(__APPLE__))
1965   if (gUseReadBarrier) {
1966     Thread* self = Thread::Current();
1967 
1968     const uintptr_t readBarrierForRootSlow =
1969         StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
1970 
1971     // Create an object
1972     ScopedObjectAccess soa(self);
1973     // garbage is created during ClassLinker::Init
1974 
1975     StackHandleScope<1> hs(soa.Self());
1976 
1977     Handle<mirror::String> obj(
1978         hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
1979 
1980     EXPECT_FALSE(self->IsExceptionPending());
1981 
1982     GcRoot<mirror::Class> root(GetClassRoot<mirror::String>());
1983     size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
1984 
1985     EXPECT_FALSE(self->IsExceptionPending());
1986     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1987     mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
1988     EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
1989     return;
1990   }
1991 #endif
1992   LOG(INFO) << "Skipping read_barrier_for_root_slow";
1993   // Force-print to std::cout so it's also outside the logcat.
1994   std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
1995 }
1996 
1997 }  // namespace art
1998