• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "base/callee_save_type.h"
22 #include "base/enums.h"
23 #include "class_linker-inl.h"
24 #include "common_runtime_test.h"
25 #include "entrypoints/quick/quick_entrypoints_enum.h"
26 #include "imt_conflict_table.h"
27 #include "jni_internal.h"
28 #include "linear_alloc.h"
29 #include "mirror/class-inl.h"
30 #include "mirror/string-inl.h"
31 #include "scoped_thread_state_change-inl.h"
32 
33 namespace art {
34 
35 
36 class StubTest : public CommonRuntimeTest {
37  protected:
38   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()39   void SetUp() OVERRIDE {
40     // Do the normal setup.
41     CommonRuntimeTest::SetUp();
42 
43     {
44       // Create callee-save methods
45       ScopedObjectAccess soa(Thread::Current());
46       runtime_->SetInstructionSet(kRuntimeISA);
47       for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
48         CalleeSaveType type = CalleeSaveType(i);
49         if (!runtime_->HasCalleeSaveMethod(type)) {
50           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
51         }
52       }
53     }
54   }
55 
SetUpRuntimeOptions(RuntimeOptions * options)56   void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
57     // Use a smaller heap
58     for (std::pair<std::string, const void*>& pair : *options) {
59       if (pair.first.find("-Xmx") == 0) {
60         pair.first = "-Xmx4M";  // Smallest we can go.
61       }
62     }
63     options->push_back(std::make_pair("-Xint", nullptr));
64   }
65 
66   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)67   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
68     return &self->tlsPtr_;
69   }
70 
71  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)72   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
73     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
74   }
75 
76   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)77   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
78                              ArtMethod* referrer) {
79     return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
80   }
81 
82   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)83   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
84                                       Thread* self, ArtMethod* referrer, size_t hidden) {
85     // Push a transition back into managed code onto the linked list in thread.
86     ManagedStack fragment;
87     self->PushManagedStackFragment(&fragment);
88 
89     size_t result;
90     size_t fpr_result = 0;
91 #if defined(__i386__)
92     // TODO: Set the thread?
93 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
94 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
95     __asm__ __volatile__(
96         "movd %[hidden], %%xmm7\n\t"  // This is a memory op, so do this early. If it is off of
97                                       // esp, then we won't be able to access it after spilling.
98 
99         // Spill 6 registers.
100         PUSH(%%ebx)
101         PUSH(%%ecx)
102         PUSH(%%edx)
103         PUSH(%%esi)
104         PUSH(%%edi)
105         PUSH(%%ebp)
106 
107         // Store the inputs to the stack, but keep the referrer up top, less work.
108         PUSH(%[referrer])           // Align stack.
109         PUSH(%[referrer])           // Store referrer
110 
111         PUSH(%[arg0])
112         PUSH(%[arg1])
113         PUSH(%[arg2])
114         PUSH(%[code])
115         // Now read them back into the required registers.
116         POP(%%edi)
117         POP(%%edx)
118         POP(%%ecx)
119         POP(%%eax)
120         // Call is prepared now.
121 
122         "call *%%edi\n\t"           // Call the stub
123         "addl $8, %%esp\n\t"        // Pop referrer and padding.
124         ".cfi_adjust_cfa_offset -8\n\t"
125 
126         // Restore 6 registers.
127         POP(%%ebp)
128         POP(%%edi)
129         POP(%%esi)
130         POP(%%edx)
131         POP(%%ecx)
132         POP(%%ebx)
133 
134         : "=a" (result)
135           // Use the result from eax
136         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
137           [referrer]"r"(referrer), [hidden]"m"(hidden)
138           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
139         : "memory", "xmm7");  // clobber.
140 #undef PUSH
141 #undef POP
142 #elif defined(__arm__)
143     __asm__ __volatile__(
144         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
145         ".cfi_adjust_cfa_offset 52\n\t"
146         "push {r9}\n\t"
147         ".cfi_adjust_cfa_offset 4\n\t"
148         "mov r9, %[referrer]\n\n"
149         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
150         ".cfi_adjust_cfa_offset 8\n\t"
151         "ldr r9, [sp, #8]\n\t"
152 
153         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
154         "sub sp, sp, #24\n\t"
155         "str %[arg0], [sp]\n\t"
156         "str %[arg1], [sp, #4]\n\t"
157         "str %[arg2], [sp, #8]\n\t"
158         "str %[code], [sp, #12]\n\t"
159         "str %[self], [sp, #16]\n\t"
160         "str %[hidden], [sp, #20]\n\t"
161         "ldr r0, [sp]\n\t"
162         "ldr r1, [sp, #4]\n\t"
163         "ldr r2, [sp, #8]\n\t"
164         "ldr r3, [sp, #12]\n\t"
165         "ldr r9, [sp, #16]\n\t"
166         "ldr r12, [sp, #20]\n\t"
167         "add sp, sp, #24\n\t"
168 
169         "blx r3\n\t"                // Call the stub
170         "add sp, sp, #12\n\t"       // Pop null and padding
171         ".cfi_adjust_cfa_offset -12\n\t"
172         "pop {r1-r12, lr}\n\t"      // Restore state
173         ".cfi_adjust_cfa_offset -52\n\t"
174         "mov %[result], r0\n\t"     // Save the result
175         : [result] "=r" (result)
176           // Use the result from r0
177         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
178           [referrer] "r"(referrer), [hidden] "r"(hidden)
179         : "r0", "memory");  // clobber.
180 #elif defined(__aarch64__)
181     __asm__ __volatile__(
182         // Spill x0-x7 which we say we don't clobber. May contain args.
183         "sub sp, sp, #80\n\t"
184         ".cfi_adjust_cfa_offset 80\n\t"
185         "stp x0, x1, [sp]\n\t"
186         "stp x2, x3, [sp, #16]\n\t"
187         "stp x4, x5, [sp, #32]\n\t"
188         "stp x6, x7, [sp, #48]\n\t"
189         // To be extra defensive, store x20. We do this because some of the stubs might make a
190         // transition into the runtime via the blr instruction below and *not* save x20.
191         "str x20, [sp, #64]\n\t"
192         // 8 byte buffer
193 
194         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
195         ".cfi_adjust_cfa_offset 16\n\t"
196         "str %[referrer], [sp]\n\t"    // referrer
197 
198         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
199         "sub sp, sp, #48\n\t"
200         ".cfi_adjust_cfa_offset 48\n\t"
201         // All things are "r" constraints, so direct str/stp should work.
202         "stp %[arg0], %[arg1], [sp]\n\t"
203         "stp %[arg2], %[code], [sp, #16]\n\t"
204         "stp %[self], %[hidden], [sp, #32]\n\t"
205 
206         // Now we definitely have x0-x3 free, use it to garble d8 - d15
207         "movk x0, #0xfad0\n\t"
208         "movk x0, #0xebad, lsl #16\n\t"
209         "movk x0, #0xfad0, lsl #32\n\t"
210         "movk x0, #0xebad, lsl #48\n\t"
211         "fmov d8, x0\n\t"
212         "add x0, x0, 1\n\t"
213         "fmov d9, x0\n\t"
214         "add x0, x0, 1\n\t"
215         "fmov d10, x0\n\t"
216         "add x0, x0, 1\n\t"
217         "fmov d11, x0\n\t"
218         "add x0, x0, 1\n\t"
219         "fmov d12, x0\n\t"
220         "add x0, x0, 1\n\t"
221         "fmov d13, x0\n\t"
222         "add x0, x0, 1\n\t"
223         "fmov d14, x0\n\t"
224         "add x0, x0, 1\n\t"
225         "fmov d15, x0\n\t"
226 
227         // Load call params into the right registers.
228         "ldp x0, x1, [sp]\n\t"
229         "ldp x2, x3, [sp, #16]\n\t"
230         "ldp x19, x17, [sp, #32]\n\t"
231         "add sp, sp, #48\n\t"
232         ".cfi_adjust_cfa_offset -48\n\t"
233 
234         "blr x3\n\t"              // Call the stub
235         "mov x8, x0\n\t"          // Store result
236         "add sp, sp, #16\n\t"     // Drop the quick "frame"
237         ".cfi_adjust_cfa_offset -16\n\t"
238 
239         // Test d8 - d15. We can use x1 and x2.
240         "movk x1, #0xfad0\n\t"
241         "movk x1, #0xebad, lsl #16\n\t"
242         "movk x1, #0xfad0, lsl #32\n\t"
243         "movk x1, #0xebad, lsl #48\n\t"
244         "fmov x2, d8\n\t"
245         "cmp x1, x2\n\t"
246         "b.ne 1f\n\t"
247         "add x1, x1, 1\n\t"
248 
249         "fmov x2, d9\n\t"
250         "cmp x1, x2\n\t"
251         "b.ne 1f\n\t"
252         "add x1, x1, 1\n\t"
253 
254         "fmov x2, d10\n\t"
255         "cmp x1, x2\n\t"
256         "b.ne 1f\n\t"
257         "add x1, x1, 1\n\t"
258 
259         "fmov x2, d11\n\t"
260         "cmp x1, x2\n\t"
261         "b.ne 1f\n\t"
262         "add x1, x1, 1\n\t"
263 
264         "fmov x2, d12\n\t"
265         "cmp x1, x2\n\t"
266         "b.ne 1f\n\t"
267         "add x1, x1, 1\n\t"
268 
269         "fmov x2, d13\n\t"
270         "cmp x1, x2\n\t"
271         "b.ne 1f\n\t"
272         "add x1, x1, 1\n\t"
273 
274         "fmov x2, d14\n\t"
275         "cmp x1, x2\n\t"
276         "b.ne 1f\n\t"
277         "add x1, x1, 1\n\t"
278 
279         "fmov x2, d15\n\t"
280         "cmp x1, x2\n\t"
281         "b.ne 1f\n\t"
282 
283         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
284 
285         // Finish up.
286         "2:\n\t"
287         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
288         "ldp x2, x3, [sp, #16]\n\t"
289         "ldp x4, x5, [sp, #32]\n\t"
290         "ldp x6, x7, [sp, #48]\n\t"
291         "ldr x20, [sp, #64]\n\t"
292         "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
293         ".cfi_adjust_cfa_offset -80\n\t"
294 
295         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
296         "mov %[result], x8\n\t"              // Store the call result
297 
298         "b 3f\n\t"                     // Goto end
299 
300         // Failed fpr verification.
301         "1:\n\t"
302         "mov x9, #1\n\t"
303         "b 2b\n\t"                     // Goto finish-up
304 
305         // End
306         "3:\n\t"
307         : [result] "=r" (result)
308           // Use the result from r0
309         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
310           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
311           // Leave one register unclobbered, which is needed for compiling with
312           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
313           // which means we should unclobber one of the callee-saved registers that are unused.
314           // Here we use x20.
315         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
316           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
317           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
318           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
319           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
320           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
321           "memory");
322 #elif defined(__mips__) && !defined(__LP64__)
323     __asm__ __volatile__ (
324         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
325         "addiu $sp, $sp, -64\n\t"
326         "sw $a0, 0($sp)\n\t"
327         "sw $a1, 4($sp)\n\t"
328         "sw $a2, 8($sp)\n\t"
329         "sw $a3, 12($sp)\n\t"
330         "sw $t0, 16($sp)\n\t"
331         "sw $t1, 20($sp)\n\t"
332         "sw $t2, 24($sp)\n\t"
333         "sw $t3, 28($sp)\n\t"
334         "sw $t4, 32($sp)\n\t"
335         "sw $t5, 36($sp)\n\t"
336         "sw $t6, 40($sp)\n\t"
337         "sw $t7, 44($sp)\n\t"
338         // Spill gp register since it is caller save.
339         "sw $gp, 52($sp)\n\t"
340 
341         "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
342         "sw %[referrer], 0($sp)\n\t"
343 
344         // Push everything on the stack, so we don't rely on the order.
345         "addiu $sp, $sp, -24\n\t"
346         "sw %[arg0], 0($sp)\n\t"
347         "sw %[arg1], 4($sp)\n\t"
348         "sw %[arg2], 8($sp)\n\t"
349         "sw %[code], 12($sp)\n\t"
350         "sw %[self], 16($sp)\n\t"
351         "sw %[hidden], 20($sp)\n\t"
352 
353         // Load call params into the right registers.
354         "lw $a0, 0($sp)\n\t"
355         "lw $a1, 4($sp)\n\t"
356         "lw $a2, 8($sp)\n\t"
357         "lw $t9, 12($sp)\n\t"
358         "lw $s1, 16($sp)\n\t"
359         "lw $t7, 20($sp)\n\t"
360         "addiu $sp, $sp, 24\n\t"
361 
362         "jalr $t9\n\t"             // Call the stub.
363         "nop\n\t"
364         "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
365 
366         // Restore stuff not named clobbered.
367         "lw $a0, 0($sp)\n\t"
368         "lw $a1, 4($sp)\n\t"
369         "lw $a2, 8($sp)\n\t"
370         "lw $a3, 12($sp)\n\t"
371         "lw $t0, 16($sp)\n\t"
372         "lw $t1, 20($sp)\n\t"
373         "lw $t2, 24($sp)\n\t"
374         "lw $t3, 28($sp)\n\t"
375         "lw $t4, 32($sp)\n\t"
376         "lw $t5, 36($sp)\n\t"
377         "lw $t6, 40($sp)\n\t"
378         "lw $t7, 44($sp)\n\t"
379         // Restore gp.
380         "lw $gp, 52($sp)\n\t"
381         "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
382 
383         "move %[result], $v0\n\t"  // Store the call result.
384         : [result] "=r" (result)
385         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
386           [referrer] "r"(referrer), [hidden] "r"(hidden)
387         : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
388           "fp", "ra",
389           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
390           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
391           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
392           "memory");  // clobber.
393 #elif defined(__mips__) && defined(__LP64__)
394     __asm__ __volatile__ (
395         // Spill a0-a7 which we say we don't clobber. May contain args.
396         "daddiu $sp, $sp, -64\n\t"
397         "sd $a0, 0($sp)\n\t"
398         "sd $a1, 8($sp)\n\t"
399         "sd $a2, 16($sp)\n\t"
400         "sd $a3, 24($sp)\n\t"
401         "sd $a4, 32($sp)\n\t"
402         "sd $a5, 40($sp)\n\t"
403         "sd $a6, 48($sp)\n\t"
404         "sd $a7, 56($sp)\n\t"
405 
406         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
407         "sd %[referrer], 0($sp)\n\t"
408 
409         // Push everything on the stack, so we don't rely on the order.
410         "daddiu $sp, $sp, -48\n\t"
411         "sd %[arg0], 0($sp)\n\t"
412         "sd %[arg1], 8($sp)\n\t"
413         "sd %[arg2], 16($sp)\n\t"
414         "sd %[code], 24($sp)\n\t"
415         "sd %[self], 32($sp)\n\t"
416         "sd %[hidden], 40($sp)\n\t"
417 
418         // Load call params into the right registers.
419         "ld $a0, 0($sp)\n\t"
420         "ld $a1, 8($sp)\n\t"
421         "ld $a2, 16($sp)\n\t"
422         "ld $t9, 24($sp)\n\t"
423         "ld $s1, 32($sp)\n\t"
424         "ld $t0, 40($sp)\n\t"
425         "daddiu $sp, $sp, 48\n\t"
426 
427         "jalr $t9\n\t"              // Call the stub.
428         "nop\n\t"
429         "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
430 
431         // Restore stuff not named clobbered.
432         "ld $a0, 0($sp)\n\t"
433         "ld $a1, 8($sp)\n\t"
434         "ld $a2, 16($sp)\n\t"
435         "ld $a3, 24($sp)\n\t"
436         "ld $a4, 32($sp)\n\t"
437         "ld $a5, 40($sp)\n\t"
438         "ld $a6, 48($sp)\n\t"
439         "ld $a7, 56($sp)\n\t"
440         "daddiu $sp, $sp, 64\n\t"
441 
442         "move %[result], $v0\n\t"   // Store the call result.
443         : [result] "=r" (result)
444         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
445           [referrer] "r"(referrer), [hidden] "r"(hidden)
446         // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
447         // t0-t3 are ambiguous.
448         : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
449           "s7", "t8", "t9", "k0", "k1", "fp", "ra",
450           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
451           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
452           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
453           "memory");  // clobber.
454 #elif defined(__x86_64__) && !defined(__APPLE__)
455 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
456 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
457     // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
458     //       restoring almost all registers.
459     // TODO: Set the thread?
460     __asm__ __volatile__(
461         // Spill almost everything (except rax, rsp). 14 registers.
462         PUSH(%%rbx)
463         PUSH(%%rcx)
464         PUSH(%%rdx)
465         PUSH(%%rsi)
466         PUSH(%%rdi)
467         PUSH(%%rbp)
468         PUSH(%%r8)
469         PUSH(%%r9)
470         PUSH(%%r10)
471         PUSH(%%r11)
472         PUSH(%%r12)
473         PUSH(%%r13)
474         PUSH(%%r14)
475         PUSH(%%r15)
476 
477         PUSH(%[referrer])              // Push referrer & 16B alignment padding
478         PUSH(%[referrer])
479 
480         // Now juggle the input registers.
481         PUSH(%[arg0])
482         PUSH(%[arg1])
483         PUSH(%[arg2])
484         PUSH(%[hidden])
485         PUSH(%[code])
486         POP(%%r8)
487         POP(%%rax)
488         POP(%%rdx)
489         POP(%%rsi)
490         POP(%%rdi)
491 
492         "call *%%r8\n\t"                  // Call the stub
493         "addq $16, %%rsp\n\t"             // Pop null and padding
494         ".cfi_adjust_cfa_offset -16\n\t"
495 
496         POP(%%r15)
497         POP(%%r14)
498         POP(%%r13)
499         POP(%%r12)
500         POP(%%r11)
501         POP(%%r10)
502         POP(%%r9)
503         POP(%%r8)
504         POP(%%rbp)
505         POP(%%rdi)
506         POP(%%rsi)
507         POP(%%rdx)
508         POP(%%rcx)
509         POP(%%rbx)
510 
511         : "=a" (result)
512         // Use the result from rax
513         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
514           [referrer] "r"(referrer), [hidden] "r"(hidden)
515         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
516         // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
517         : "memory");  // We spill and restore (almost) all registers, so only mention memory here.
518 #undef PUSH
519 #undef POP
520 #else
521     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
522     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
523     result = 0;
524 #endif
525     // Pop transition.
526     self->PopManagedStackFragment(fragment);
527 
528     fp_result = fpr_result;
529     EXPECT_EQ(0U, fp_result);
530 
531     return result;
532   }
533 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)534   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
535     int32_t offset;
536     offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
537     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
538   }
539 
540  protected:
541   size_t fp_result;
542 };
543 
544 
TEST_F(StubTest,Memcpy)545 TEST_F(StubTest, Memcpy) {
546 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
547   Thread* self = Thread::Current();
548 
549   uint32_t orig[20];
550   uint32_t trg[20];
551   for (size_t i = 0; i < 20; ++i) {
552     orig[i] = i;
553     trg[i] = 0;
554   }
555 
556   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
557           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
558 
559   EXPECT_EQ(orig[0], trg[0]);
560 
561   for (size_t i = 1; i < 4; ++i) {
562     EXPECT_NE(orig[i], trg[i]);
563   }
564 
565   for (size_t i = 4; i < 14; ++i) {
566     EXPECT_EQ(orig[i], trg[i]);
567   }
568 
569   for (size_t i = 14; i < 20; ++i) {
570     EXPECT_NE(orig[i], trg[i]);
571   }
572 
573   // TODO: Test overlapping?
574 
575 #else
576   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
577   // Force-print to std::cout so it's also outside the logcat.
578   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
579 #endif
580 }
581 
TEST_F(StubTest,LockObject)582 TEST_F(StubTest, LockObject) {
583 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
584     (defined(__x86_64__) && !defined(__APPLE__))
585   static constexpr size_t kThinLockLoops = 100;
586 
587   Thread* self = Thread::Current();
588 
589   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
590 
591   // Create an object
592   ScopedObjectAccess soa(self);
593   // garbage is created during ClassLinker::Init
594 
595   StackHandleScope<2> hs(soa.Self());
596   Handle<mirror::String> obj(
597       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
598   LockWord lock = obj->GetLockWord(false);
599   LockWord::LockState old_state = lock.GetState();
600   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
601 
602   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
603 
604   LockWord lock_after = obj->GetLockWord(false);
605   LockWord::LockState new_state = lock_after.GetState();
606   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
607   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
608 
609   for (size_t i = 1; i < kThinLockLoops; ++i) {
610     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
611 
612     // Check we're at lock count i
613 
614     LockWord l_inc = obj->GetLockWord(false);
615     LockWord::LockState l_inc_state = l_inc.GetState();
616     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
617     EXPECT_EQ(l_inc.ThinLockCount(), i);
618   }
619 
620   // Force a fat lock by running identity hashcode to fill up lock word.
621   Handle<mirror::String> obj2(hs.NewHandle(
622       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
623 
624   obj2->IdentityHashCode();
625 
626   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
627 
628   LockWord lock_after2 = obj2->GetLockWord(false);
629   LockWord::LockState new_state2 = lock_after2.GetState();
630   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
631   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
632 
633   // Test done.
634 #else
635   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
636   // Force-print to std::cout so it's also outside the logcat.
637   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
638 #endif
639 }
640 
641 
642 class RandGen {
643  public:
RandGen(uint32_t seed)644   explicit RandGen(uint32_t seed) : val_(seed) {}
645 
next()646   uint32_t next() {
647     val_ = val_ * 48271 % 2147483647 + 13;
648     return val_;
649   }
650 
651   uint32_t val_;
652 };
653 
654 
655 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)656 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
657 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
658     (defined(__x86_64__) && !defined(__APPLE__))
659   static constexpr size_t kThinLockLoops = 100;
660 
661   Thread* self = Thread::Current();
662 
663   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
664   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
665   // Create an object
666   ScopedObjectAccess soa(self);
667   // garbage is created during ClassLinker::Init
668   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
669   StackHandleScope<kNumberOfLocks + 1> hs(self);
670   Handle<mirror::String> obj(
671       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
672   LockWord lock = obj->GetLockWord(false);
673   LockWord::LockState old_state = lock.GetState();
674   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
675 
676   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
677   // This should be an illegal monitor state.
678   EXPECT_TRUE(self->IsExceptionPending());
679   self->ClearException();
680 
681   LockWord lock_after = obj->GetLockWord(false);
682   LockWord::LockState new_state = lock_after.GetState();
683   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
684 
685   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
686 
687   LockWord lock_after2 = obj->GetLockWord(false);
688   LockWord::LockState new_state2 = lock_after2.GetState();
689   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
690 
691   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
692 
693   LockWord lock_after3 = obj->GetLockWord(false);
694   LockWord::LockState new_state3 = lock_after3.GetState();
695   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
696 
697   // Stress test:
698   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
699   // each step.
700 
701   RandGen r(0x1234);
702 
703   constexpr size_t kIterations = 10000;  // Number of iterations
704   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
705 
706   size_t counts[kNumberOfLocks];
707   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
708   Handle<mirror::String> objects[kNumberOfLocks];
709 
710   // Initialize = allocate.
711   for (size_t i = 0; i < kNumberOfLocks; ++i) {
712     counts[i] = 0;
713     fat[i] = false;
714     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
715   }
716 
717   for (size_t i = 0; i < kIterations; ++i) {
718     // Select which lock to update.
719     size_t index = r.next() % kNumberOfLocks;
720 
721     // Make lock fat?
722     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
723       fat[index] = true;
724       objects[index]->IdentityHashCode();
725 
726       LockWord lock_iter = objects[index]->GetLockWord(false);
727       LockWord::LockState iter_state = lock_iter.GetState();
728       if (counts[index] == 0) {
729         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
730       } else {
731         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
732       }
733     } else {
734       bool take_lock;  // Whether to lock or unlock in this step.
735       if (counts[index] == 0) {
736         take_lock = true;
737       } else if (counts[index] == kThinLockLoops) {
738         take_lock = false;
739       } else {
740         // Randomly.
741         take_lock = r.next() % 2 == 0;
742       }
743 
744       if (take_lock) {
745         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
746                       self);
747         counts[index]++;
748       } else {
749         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
750                       art_quick_unlock_object, self);
751         counts[index]--;
752       }
753 
754       EXPECT_FALSE(self->IsExceptionPending());
755 
756       // Check the new state.
757       LockWord lock_iter = objects[index]->GetLockWord(true);
758       LockWord::LockState iter_state = lock_iter.GetState();
759       if (fat[index]) {
760         // Abuse MonitorInfo.
761         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
762         MonitorInfo info(objects[index].Get());
763         EXPECT_EQ(counts[index], info.entry_count_) << index;
764       } else {
765         if (counts[index] > 0) {
766           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
767           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
768         } else {
769           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
770         }
771       }
772     }
773   }
774 
775   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
776   // Go reverse order to correctly handle Handles.
777   for (size_t i = 0; i < kNumberOfLocks; ++i) {
778     size_t index = kNumberOfLocks - 1 - i;
779     size_t count = counts[index];
780     while (count > 0) {
781       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
782                     self);
783       count--;
784     }
785 
786     LockWord lock_after4 = objects[index]->GetLockWord(false);
787     LockWord::LockState new_state4 = lock_after4.GetState();
788     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
789                 || LockWord::LockState::kFatLocked == new_state4);
790   }
791 
792   // Test done.
793 #else
794   UNUSED(test);
795   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
796   // Force-print to std::cout so it's also outside the logcat.
797   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
798 #endif
799 }
800 
TEST_F(StubTest,UnlockObject)801 TEST_F(StubTest, UnlockObject) {
802   // This will lead to monitor error messages in the log.
803   ScopedLogSeverity sls(LogSeverity::FATAL);
804 
805   TestUnlockObject(this);
806 }
807 
808 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
809     (defined(__x86_64__) && !defined(__APPLE__))
810 extern "C" void art_quick_check_instance_of(void);
811 #endif
812 
TEST_F(StubTest,CheckCast)813 TEST_F(StubTest, CheckCast) {
814 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
815     (defined(__x86_64__) && !defined(__APPLE__))
816   Thread* self = Thread::Current();
817 
818   const uintptr_t art_quick_check_instance_of =
819       StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
820 
821   // Find some classes.
822   ScopedObjectAccess soa(self);
823   // garbage is created during ClassLinker::Init
824 
825   VariableSizedHandleScope hs(soa.Self());
826   Handle<mirror::Class> klass_obj(
827       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
828   Handle<mirror::Class> klass_str(
829       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
830   Handle<mirror::Class> klass_list(
831       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
832   Handle<mirror::Class> klass_cloneable(
833         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
834   Handle<mirror::Class> klass_array_list(
835       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
836   Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
837   Handle<mirror::String> string(hs.NewHandle(
838       mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
839   Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
840 
841   EXPECT_FALSE(self->IsExceptionPending());
842 
843   Invoke3(reinterpret_cast<size_t>(obj.Get()),
844           reinterpret_cast<size_t>(klass_obj.Get()),
845           0U,
846           art_quick_check_instance_of,
847           self);
848   EXPECT_FALSE(self->IsExceptionPending());
849 
850   // Expected true: Test string instance of java.lang.String.
851   Invoke3(reinterpret_cast<size_t>(string.Get()),
852           reinterpret_cast<size_t>(klass_str.Get()),
853           0U,
854           art_quick_check_instance_of,
855           self);
856   EXPECT_FALSE(self->IsExceptionPending());
857 
858   // Expected true: Test string instance of java.lang.Object.
859   Invoke3(reinterpret_cast<size_t>(string.Get()),
860           reinterpret_cast<size_t>(klass_obj.Get()),
861           0U,
862           art_quick_check_instance_of,
863           self);
864   EXPECT_FALSE(self->IsExceptionPending());
865 
866   // Expected false: Test object instance of java.lang.String.
867   Invoke3(reinterpret_cast<size_t>(obj.Get()),
868           reinterpret_cast<size_t>(klass_str.Get()),
869           0U,
870           art_quick_check_instance_of,
871           self);
872   EXPECT_TRUE(self->IsExceptionPending());
873   self->ClearException();
874 
875   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
876           reinterpret_cast<size_t>(klass_list.Get()),
877           0U,
878           art_quick_check_instance_of,
879           self);
880   EXPECT_FALSE(self->IsExceptionPending());
881 
882   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
883           reinterpret_cast<size_t>(klass_cloneable.Get()),
884           0U,
885           art_quick_check_instance_of,
886           self);
887   EXPECT_FALSE(self->IsExceptionPending());
888 
889   Invoke3(reinterpret_cast<size_t>(string.Get()),
890           reinterpret_cast<size_t>(klass_array_list.Get()),
891           0U,
892           art_quick_check_instance_of,
893           self);
894   EXPECT_TRUE(self->IsExceptionPending());
895   self->ClearException();
896 
897   Invoke3(reinterpret_cast<size_t>(string.Get()),
898           reinterpret_cast<size_t>(klass_cloneable.Get()),
899           0U,
900           art_quick_check_instance_of,
901           self);
902   EXPECT_TRUE(self->IsExceptionPending());
903   self->ClearException();
904 
905 #else
906   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
907   // Force-print to std::cout so it's also outside the logcat.
908   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
909 #endif
910 }
911 
TEST_F(StubTest,AllocObject)912 TEST_F(StubTest, AllocObject) {
913 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
914     (defined(__x86_64__) && !defined(__APPLE__))
915   // This will lead to OOM  error messages in the log.
916   ScopedLogSeverity sls(LogSeverity::FATAL);
917 
918   // TODO: Check the "Unresolved" allocation stubs
919 
920   Thread* self = Thread::Current();
921   // Create an object
922   ScopedObjectAccess soa(self);
923   // garbage is created during ClassLinker::Init
924 
925   StackHandleScope<2> hs(soa.Self());
926   Handle<mirror::Class> c(
927       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
928 
929   // Play with it...
930 
931   EXPECT_FALSE(self->IsExceptionPending());
932   {
933     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
934                             StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
935                             self);
936 
937     EXPECT_FALSE(self->IsExceptionPending());
938     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
939     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
940     EXPECT_EQ(c.Get(), obj->GetClass());
941     VerifyObject(obj);
942   }
943 
944   {
945     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
946                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
947                             self);
948 
949     EXPECT_FALSE(self->IsExceptionPending());
950     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
951     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
952     EXPECT_EQ(c.Get(), obj->GetClass());
953     VerifyObject(obj);
954   }
955 
956   {
957     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
958                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
959                             self);
960 
961     EXPECT_FALSE(self->IsExceptionPending());
962     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
963     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
964     EXPECT_EQ(c.Get(), obj->GetClass());
965     VerifyObject(obj);
966   }
967 
968   // Failure tests.
969 
970   // Out-of-memory.
971   {
972     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
973 
974     // Array helps to fill memory faster.
975     Handle<mirror::Class> ca(
976         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
977 
978     // Use arbitrary large amount for now.
979     static const size_t kMaxHandles = 1000000;
980     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
981 
982     std::vector<Handle<mirror::Object>> handles;
983     // Start allocating with 128K
984     size_t length = 128 * KB / 4;
985     while (length > 10) {
986       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
987           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
988       if (self->IsExceptionPending() || h == nullptr) {
989         self->ClearException();
990 
991         // Try a smaller length
992         length = length / 8;
993         // Use at most half the reported free space.
994         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
995         if (length * 8 > mem) {
996           length = mem / 8;
997         }
998       } else {
999         handles.push_back(h);
1000       }
1001     }
1002     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1003 
1004     // Allocate simple objects till it fails.
1005     while (!self->IsExceptionPending()) {
1006       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1007       if (!self->IsExceptionPending() && h != nullptr) {
1008         handles.push_back(h);
1009       }
1010     }
1011     self->ClearException();
1012 
1013     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1014                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1015                             self);
1016     EXPECT_TRUE(self->IsExceptionPending());
1017     self->ClearException();
1018     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1019   }
1020 
1021   // Tests done.
1022 #else
1023   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1024   // Force-print to std::cout so it's also outside the logcat.
1025   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1026 #endif
1027 }
1028 
TEST_F(StubTest,AllocObjectArray)1029 TEST_F(StubTest, AllocObjectArray) {
1030 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1031     (defined(__x86_64__) && !defined(__APPLE__))
1032   // TODO: Check the "Unresolved" allocation stubs
1033 
1034   // This will lead to OOM  error messages in the log.
1035   ScopedLogSeverity sls(LogSeverity::FATAL);
1036 
1037   Thread* self = Thread::Current();
1038   // Create an object
1039   ScopedObjectAccess soa(self);
1040   // garbage is created during ClassLinker::Init
1041 
1042   StackHandleScope<1> hs(self);
1043   Handle<mirror::Class> c(
1044       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1045 
1046   // Play with it...
1047 
1048   EXPECT_FALSE(self->IsExceptionPending());
1049 
1050   {
1051     // We can use null in the second argument as we do not need a method here (not used in
1052     // resolved/initialized cases)
1053     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
1054                             reinterpret_cast<size_t>(nullptr),
1055                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1056                             self);
1057     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1058     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1059     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1060     EXPECT_TRUE(obj->IsArrayInstance());
1061     EXPECT_TRUE(obj->IsObjectArray());
1062     EXPECT_EQ(c.Get(), obj->GetClass());
1063     VerifyObject(obj);
1064     mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
1065     EXPECT_EQ(array->GetLength(), 10);
1066   }
1067 
1068   // Failure tests.
1069 
1070   // Out-of-memory.
1071   {
1072     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
1073                             GB,  // that should fail...
1074                             reinterpret_cast<size_t>(nullptr),
1075                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1076                             self);
1077 
1078     EXPECT_TRUE(self->IsExceptionPending());
1079     self->ClearException();
1080     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1081   }
1082 
1083   // Tests done.
1084 #else
1085   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1086   // Force-print to std::cout so it's also outside the logcat.
1087   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1088 #endif
1089 }
1090 
1091 
TEST_F(StubTest,StringCompareTo)1092 TEST_F(StubTest, StringCompareTo) {
1093   TEST_DISABLED_FOR_STRING_COMPRESSION();
1094   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
1095 #if defined(__i386__) || defined(__mips__) || \
1096     (defined(__x86_64__) && !defined(__APPLE__))
1097   // TODO: Check the "Unresolved" allocation stubs
1098 
1099   Thread* self = Thread::Current();
1100 
1101   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1102 
1103   ScopedObjectAccess soa(self);
1104   // garbage is created during ClassLinker::Init
1105 
1106   // Create some strings
1107   // Use array so we can index into it and use a matrix for expected results
1108   // Setup: The first half is standard. The second half uses a non-zero offset.
1109   // TODO: Shared backing arrays.
1110   const char* c[] = { "", "", "a", "aa", "ab",
1111       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
1112       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
1113       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
1114                                                   // defeat object-equal optimizations.
1115   static constexpr size_t kStringCount = arraysize(c);
1116 
1117   StackHandleScope<kStringCount> hs(self);
1118   Handle<mirror::String> s[kStringCount];
1119 
1120   for (size_t i = 0; i < kStringCount; ++i) {
1121     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1122   }
1123 
1124   // TODO: wide characters
1125 
1126   // Matrix of expectations. First component is first parameter. Note we only check against the
1127   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1128   // rely on String::CompareTo being correct.
1129   int32_t expected[kStringCount][kStringCount];
1130   for (size_t x = 0; x < kStringCount; ++x) {
1131     for (size_t y = 0; y < kStringCount; ++y) {
1132       expected[x][y] = s[x]->CompareTo(s[y].Get());
1133     }
1134   }
1135 
1136   // Play with it...
1137 
1138   for (size_t x = 0; x < kStringCount; ++x) {
1139     for (size_t y = 0; y < kStringCount; ++y) {
1140       // Test string_compareto x y
1141       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1142                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1143                               art_quick_string_compareto, self);
1144 
1145       EXPECT_FALSE(self->IsExceptionPending());
1146 
1147       // The result is a 32b signed integer
1148       union {
1149         size_t r;
1150         int32_t i;
1151       } conv;
1152       conv.r = result;
1153       int32_t e = expected[x][y];
1154       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1155           conv.r;
1156       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1157           conv.r;
1158       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1159           conv.r;
1160     }
1161   }
1162 
1163   // TODO: Deallocate things.
1164 
1165   // Tests done.
1166 #else
1167   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1168   // Force-print to std::cout so it's also outside the logcat.
1169   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1170       std::endl;
1171 #endif
1172 }
1173 
1174 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1175 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1176                                 ArtMethod* referrer, StubTest* test)
1177     REQUIRES_SHARED(Locks::mutator_lock_) {
1178 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1179     (defined(__x86_64__) && !defined(__APPLE__))
1180   constexpr size_t num_values = 5;
1181   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1182 
1183   for (size_t i = 0; i < num_values; ++i) {
1184     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1185                               static_cast<size_t>(values[i]),
1186                               0U,
1187                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1188                               self,
1189                               referrer);
1190 
1191     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1192                                            0U, 0U,
1193                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1194                                            self,
1195                                            referrer);
1196     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1197     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1198   }
1199 #else
1200   UNUSED(f, self, referrer, test);
1201   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1202   // Force-print to std::cout so it's also outside the logcat.
1203   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1204 #endif
1205 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1206 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1207                              StubTest* test)
1208     REQUIRES_SHARED(Locks::mutator_lock_) {
1209 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1210     (defined(__x86_64__) && !defined(__APPLE__))
1211   int8_t values[] = { -128, -64, 0, 64, 127 };
1212 
1213   for (size_t i = 0; i < arraysize(values); ++i) {
1214     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1215                               static_cast<size_t>(values[i]),
1216                               0U,
1217                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1218                               self,
1219                               referrer);
1220 
1221     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1222                                            0U, 0U,
1223                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1224                                            self,
1225                                            referrer);
1226     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1227   }
1228 #else
1229   UNUSED(f, self, referrer, test);
1230   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1231   // Force-print to std::cout so it's also outside the logcat.
1232   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1233 #endif
1234 }
1235 
1236 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1237 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1238                                   ArtMethod* referrer, StubTest* test)
1239     REQUIRES_SHARED(Locks::mutator_lock_) {
1240 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1241     (defined(__x86_64__) && !defined(__APPLE__))
1242   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1243 
1244   for (size_t i = 0; i < arraysize(values); ++i) {
1245     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1246                               reinterpret_cast<size_t>(obj->Get()),
1247                               static_cast<size_t>(values[i]),
1248                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1249                               self,
1250                               referrer);
1251 
1252     uint8_t res = f->GetBoolean(obj->Get());
1253     EXPECT_EQ(values[i], res) << "Iteration " << i;
1254 
1255     f->SetBoolean<false>(obj->Get(), res);
1256 
1257     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1258                                             reinterpret_cast<size_t>(obj->Get()),
1259                                             0U,
1260                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1261                                             self,
1262                                             referrer);
1263     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1264   }
1265 #else
1266   UNUSED(obj, f, self, referrer, test);
1267   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1268   // Force-print to std::cout so it's also outside the logcat.
1269   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1270 #endif
1271 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1272 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1273                              Thread* self, ArtMethod* referrer, StubTest* test)
1274     REQUIRES_SHARED(Locks::mutator_lock_) {
1275 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1276     (defined(__x86_64__) && !defined(__APPLE__))
1277   int8_t values[] = { -128, -64, 0, 64, 127 };
1278 
1279   for (size_t i = 0; i < arraysize(values); ++i) {
1280     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1281                               reinterpret_cast<size_t>(obj->Get()),
1282                               static_cast<size_t>(values[i]),
1283                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1284                               self,
1285                               referrer);
1286 
1287     int8_t res = f->GetByte(obj->Get());
1288     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1289     f->SetByte<false>(obj->Get(), ++res);
1290 
1291     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1292                                             reinterpret_cast<size_t>(obj->Get()),
1293                                             0U,
1294                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1295                                             self,
1296                                             referrer);
1297     EXPECT_EQ(res, static_cast<int8_t>(res2));
1298   }
1299 #else
1300   UNUSED(obj, f, self, referrer, test);
1301   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1302   // Force-print to std::cout so it's also outside the logcat.
1303   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1304 #endif
1305 }
1306 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1307 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1308                              StubTest* test)
1309     REQUIRES_SHARED(Locks::mutator_lock_) {
1310 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1311     (defined(__x86_64__) && !defined(__APPLE__))
1312   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1313 
1314   for (size_t i = 0; i < arraysize(values); ++i) {
1315     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1316                               static_cast<size_t>(values[i]),
1317                               0U,
1318                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1319                               self,
1320                               referrer);
1321 
1322     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1323                                            0U, 0U,
1324                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1325                                            self,
1326                                            referrer);
1327 
1328     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1329   }
1330 #else
1331   UNUSED(f, self, referrer, test);
1332   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1333   // Force-print to std::cout so it's also outside the logcat.
1334   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1335 #endif
1336 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1337 static void GetSetShortStatic(ArtField* f, Thread* self,
1338                               ArtMethod* referrer, StubTest* test)
1339     REQUIRES_SHARED(Locks::mutator_lock_) {
1340 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1341     (defined(__x86_64__) && !defined(__APPLE__))
1342   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1343 
1344   for (size_t i = 0; i < arraysize(values); ++i) {
1345     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1346                               static_cast<size_t>(values[i]),
1347                               0U,
1348                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1349                               self,
1350                               referrer);
1351 
1352     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1353                                            0U, 0U,
1354                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1355                                            self,
1356                                            referrer);
1357 
1358     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1359   }
1360 #else
1361   UNUSED(f, self, referrer, test);
1362   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1363   // Force-print to std::cout so it's also outside the logcat.
1364   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1365 #endif
1366 }
1367 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1368 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1369                                Thread* self, ArtMethod* referrer, StubTest* test)
1370     REQUIRES_SHARED(Locks::mutator_lock_) {
1371 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1372     (defined(__x86_64__) && !defined(__APPLE__))
1373   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1374 
1375   for (size_t i = 0; i < arraysize(values); ++i) {
1376     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1377                               reinterpret_cast<size_t>(obj->Get()),
1378                               static_cast<size_t>(values[i]),
1379                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1380                               self,
1381                               referrer);
1382 
1383     uint16_t res = f->GetChar(obj->Get());
1384     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1385     f->SetChar<false>(obj->Get(), ++res);
1386 
1387     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1388                                             reinterpret_cast<size_t>(obj->Get()),
1389                                             0U,
1390                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1391                                             self,
1392                                             referrer);
1393     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1394   }
1395 #else
1396   UNUSED(obj, f, self, referrer, test);
1397   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1398   // Force-print to std::cout so it's also outside the logcat.
1399   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1400 #endif
1401 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1402 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1403                              Thread* self, ArtMethod* referrer, StubTest* test)
1404     REQUIRES_SHARED(Locks::mutator_lock_) {
1405 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1406     (defined(__x86_64__) && !defined(__APPLE__))
1407   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1408 
1409   for (size_t i = 0; i < arraysize(values); ++i) {
1410     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1411                               reinterpret_cast<size_t>(obj->Get()),
1412                               static_cast<size_t>(values[i]),
1413                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1414                               self,
1415                               referrer);
1416 
1417     int16_t res = f->GetShort(obj->Get());
1418     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1419     f->SetShort<false>(obj->Get(), ++res);
1420 
1421     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1422                                             reinterpret_cast<size_t>(obj->Get()),
1423                                             0U,
1424                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1425                                             self,
1426                                             referrer);
1427     EXPECT_EQ(res, static_cast<int16_t>(res2));
1428   }
1429 #else
1430   UNUSED(obj, f, self, referrer, test);
1431   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1432   // Force-print to std::cout so it's also outside the logcat.
1433   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1434 #endif
1435 }
1436 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1437 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1438                            StubTest* test)
1439     REQUIRES_SHARED(Locks::mutator_lock_) {
1440 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1441     (defined(__x86_64__) && !defined(__APPLE__))
1442   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1443 
1444   for (size_t i = 0; i < arraysize(values); ++i) {
1445     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1446                               static_cast<size_t>(values[i]),
1447                               0U,
1448                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1449                               self,
1450                               referrer);
1451 
1452     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1453                                            0U, 0U,
1454                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1455                                            self,
1456                                            referrer);
1457 
1458 #if defined(__mips__) && defined(__LP64__)
1459     EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
1460 #else
1461     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1462 #endif
1463   }
1464 #else
1465   UNUSED(f, self, referrer, test);
1466   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1467   // Force-print to std::cout so it's also outside the logcat.
1468   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1469 #endif
1470 }
1471 
1472 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1473 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1474                              Thread* self, ArtMethod* referrer, StubTest* test)
1475     REQUIRES_SHARED(Locks::mutator_lock_) {
1476 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1477     (defined(__x86_64__) && !defined(__APPLE__))
1478   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1479 
1480   for (size_t i = 0; i < arraysize(values); ++i) {
1481     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1482                               reinterpret_cast<size_t>(obj->Get()),
1483                               static_cast<size_t>(values[i]),
1484                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1485                               self,
1486                               referrer);
1487 
1488     int32_t res = f->GetInt(obj->Get());
1489     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1490 
1491     res++;
1492     f->SetInt<false>(obj->Get(), res);
1493 
1494     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1495                                             reinterpret_cast<size_t>(obj->Get()),
1496                                             0U,
1497                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1498                                             self,
1499                                             referrer);
1500     EXPECT_EQ(res, static_cast<int32_t>(res2));
1501   }
1502 #else
1503   UNUSED(obj, f, self, referrer, test);
1504   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1505   // Force-print to std::cout so it's also outside the logcat.
1506   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1507 #endif
1508 }
1509 
1510 
1511 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1512     (defined(__x86_64__) && !defined(__APPLE__))
1513 
set_and_check_static(uint32_t f_idx,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1514 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1515                                  ArtMethod* referrer, StubTest* test)
1516     REQUIRES_SHARED(Locks::mutator_lock_) {
1517   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1518                             reinterpret_cast<size_t>(val),
1519                             0U,
1520                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1521                             self,
1522                             referrer);
1523 
1524   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1525                                          0U, 0U,
1526                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1527                                          self,
1528                                          referrer);
1529 
1530   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1531 }
1532 #endif
1533 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1534 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1535                             StubTest* test)
1536     REQUIRES_SHARED(Locks::mutator_lock_) {
1537 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1538     (defined(__x86_64__) && !defined(__APPLE__))
1539   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1540 
1541   // Allocate a string object for simplicity.
1542   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1543   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1544 
1545   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1546 #else
1547   UNUSED(f, self, referrer, test);
1548   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1549   // Force-print to std::cout so it's also outside the logcat.
1550   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1551 #endif
1552 }
1553 
1554 
1555 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1556     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,mirror::Object * trg,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1557 static void set_and_check_instance(ArtField* f, mirror::Object* trg,
1558                                    mirror::Object* val, Thread* self, ArtMethod* referrer,
1559                                    StubTest* test)
1560     REQUIRES_SHARED(Locks::mutator_lock_) {
1561   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1562                             reinterpret_cast<size_t>(trg),
1563                             reinterpret_cast<size_t>(val),
1564                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1565                             self,
1566                             referrer);
1567 
1568   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1569                                          reinterpret_cast<size_t>(trg),
1570                                          0U,
1571                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1572                                          self,
1573                                          referrer);
1574 
1575   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1576 
1577   EXPECT_OBJ_PTR_EQ(val, f->GetObj(trg));
1578 }
1579 #endif
1580 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1581 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1582                               Thread* self, ArtMethod* referrer, StubTest* test)
1583     REQUIRES_SHARED(Locks::mutator_lock_) {
1584 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1585     (defined(__x86_64__) && !defined(__APPLE__))
1586   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1587 
1588   // Allocate a string object for simplicity.
1589   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1590   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1591 
1592   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1593 #else
1594   UNUSED(obj, f, self, referrer, test);
1595   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1596   // Force-print to std::cout so it's also outside the logcat.
1597   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1598 #endif
1599 }
1600 
1601 
1602 // TODO: Complete these tests for 32b architectures
1603 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1604 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1605                            StubTest* test)
1606     REQUIRES_SHARED(Locks::mutator_lock_) {
1607 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
1608     || defined(__aarch64__)
1609   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1610 
1611   for (size_t i = 0; i < arraysize(values); ++i) {
1612     // 64 bit FieldSet stores the set value in the second register.
1613     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1614                               values[i],
1615                               0U,
1616                               StubTest::GetEntrypoint(self, kQuickSet64Static),
1617                               self,
1618                               referrer);
1619 
1620     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1621                                            0U, 0U,
1622                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1623                                            self,
1624                                            referrer);
1625 
1626     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1627   }
1628 #else
1629   UNUSED(f, self, referrer, test);
1630   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1631   // Force-print to std::cout so it's also outside the logcat.
1632   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1633 #endif
1634 }
1635 
1636 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1637 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1638                              Thread* self, ArtMethod* referrer, StubTest* test)
1639     REQUIRES_SHARED(Locks::mutator_lock_) {
1640 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
1641     defined(__aarch64__)
1642   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1643 
1644   for (size_t i = 0; i < arraysize(values); ++i) {
1645     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1646                               reinterpret_cast<size_t>(obj->Get()),
1647                               static_cast<size_t>(values[i]),
1648                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1649                               self,
1650                               referrer);
1651 
1652     int64_t res = f->GetLong(obj->Get());
1653     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1654 
1655     res++;
1656     f->SetLong<false>(obj->Get(), res);
1657 
1658     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1659                                             reinterpret_cast<size_t>(obj->Get()),
1660                                             0U,
1661                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1662                                             self,
1663                                             referrer);
1664     EXPECT_EQ(res, static_cast<int64_t>(res2));
1665   }
1666 #else
1667   UNUSED(obj, f, self, referrer, test);
1668   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1669   // Force-print to std::cout so it's also outside the logcat.
1670   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1671 #endif
1672 }
1673 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1674 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1675   // garbage is created during ClassLinker::Init
1676 
1677   JNIEnv* env = Thread::Current()->GetJniEnv();
1678   jclass jc = env->FindClass("AllFields");
1679   CHECK(jc != nullptr);
1680   jobject o = env->AllocObject(jc);
1681   CHECK(o != nullptr);
1682 
1683   ScopedObjectAccess soa(self);
1684   StackHandleScope<3> hs(self);
1685   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(o)));
1686   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1687   // Need a method as a referrer
1688   ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
1689 
1690   // Play with it...
1691 
1692   // Static fields.
1693   for (ArtField& f : c->GetSFields()) {
1694     Primitive::Type type = f.GetTypeAsPrimitiveType();
1695     if (test_type != type) {
1696      continue;
1697     }
1698     switch (type) {
1699       case Primitive::Type::kPrimBoolean:
1700         GetSetBooleanStatic(&f, self, m, test);
1701         break;
1702       case Primitive::Type::kPrimByte:
1703         GetSetByteStatic(&f, self, m, test);
1704         break;
1705       case Primitive::Type::kPrimChar:
1706         GetSetCharStatic(&f, self, m, test);
1707         break;
1708       case Primitive::Type::kPrimShort:
1709         GetSetShortStatic(&f, self, m, test);
1710         break;
1711       case Primitive::Type::kPrimInt:
1712         GetSet32Static(&f, self, m, test);
1713         break;
1714       case Primitive::Type::kPrimLong:
1715         GetSet64Static(&f, self, m, test);
1716         break;
1717       case Primitive::Type::kPrimNot:
1718         // Don't try array.
1719         if (f.GetTypeDescriptor()[0] != '[') {
1720           GetSetObjStatic(&f, self, m, test);
1721         }
1722         break;
1723       default:
1724         break;  // Skip.
1725     }
1726   }
1727 
1728   // Instance fields.
1729   for (ArtField& f : c->GetIFields()) {
1730     Primitive::Type type = f.GetTypeAsPrimitiveType();
1731     if (test_type != type) {
1732       continue;
1733     }
1734     switch (type) {
1735       case Primitive::Type::kPrimBoolean:
1736         GetSetBooleanInstance(&obj, &f, self, m, test);
1737         break;
1738       case Primitive::Type::kPrimByte:
1739         GetSetByteInstance(&obj, &f, self, m, test);
1740         break;
1741       case Primitive::Type::kPrimChar:
1742         GetSetCharInstance(&obj, &f, self, m, test);
1743         break;
1744       case Primitive::Type::kPrimShort:
1745         GetSetShortInstance(&obj, &f, self, m, test);
1746         break;
1747       case Primitive::Type::kPrimInt:
1748         GetSet32Instance(&obj, &f, self, m, test);
1749         break;
1750       case Primitive::Type::kPrimLong:
1751         GetSet64Instance(&obj, &f, self, m, test);
1752         break;
1753       case Primitive::Type::kPrimNot:
1754         // Don't try array.
1755         if (f.GetTypeDescriptor()[0] != '[') {
1756           GetSetObjInstance(&obj, &f, self, m, test);
1757         }
1758         break;
1759       default:
1760         break;  // Skip.
1761     }
1762   }
1763 
1764   // TODO: Deallocate things.
1765 }
1766 
TEST_F(StubTest,Fields8)1767 TEST_F(StubTest, Fields8) {
1768   Thread* self = Thread::Current();
1769 
1770   self->TransitionFromSuspendedToRunnable();
1771   LoadDex("AllFields");
1772   bool started = runtime_->Start();
1773   CHECK(started);
1774 
1775   TestFields(self, this, Primitive::Type::kPrimBoolean);
1776   TestFields(self, this, Primitive::Type::kPrimByte);
1777 }
1778 
TEST_F(StubTest,Fields16)1779 TEST_F(StubTest, Fields16) {
1780   Thread* self = Thread::Current();
1781 
1782   self->TransitionFromSuspendedToRunnable();
1783   LoadDex("AllFields");
1784   bool started = runtime_->Start();
1785   CHECK(started);
1786 
1787   TestFields(self, this, Primitive::Type::kPrimChar);
1788   TestFields(self, this, Primitive::Type::kPrimShort);
1789 }
1790 
TEST_F(StubTest,Fields32)1791 TEST_F(StubTest, Fields32) {
1792   Thread* self = Thread::Current();
1793 
1794   self->TransitionFromSuspendedToRunnable();
1795   LoadDex("AllFields");
1796   bool started = runtime_->Start();
1797   CHECK(started);
1798 
1799   TestFields(self, this, Primitive::Type::kPrimInt);
1800 }
1801 
TEST_F(StubTest,FieldsObj)1802 TEST_F(StubTest, FieldsObj) {
1803   Thread* self = Thread::Current();
1804 
1805   self->TransitionFromSuspendedToRunnable();
1806   LoadDex("AllFields");
1807   bool started = runtime_->Start();
1808   CHECK(started);
1809 
1810   TestFields(self, this, Primitive::Type::kPrimNot);
1811 }
1812 
TEST_F(StubTest,Fields64)1813 TEST_F(StubTest, Fields64) {
1814   Thread* self = Thread::Current();
1815 
1816   self->TransitionFromSuspendedToRunnable();
1817   LoadDex("AllFields");
1818   bool started = runtime_->Start();
1819   CHECK(started);
1820 
1821   TestFields(self, this, Primitive::Type::kPrimLong);
1822 }
1823 
1824 // Disabled, b/27991555 .
1825 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1826 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1827 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1828 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1829 TEST_F(StubTest, DISABLED_IMT) {
1830 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1831     (defined(__x86_64__) && !defined(__APPLE__))
1832   Thread* self = Thread::Current();
1833 
1834   ScopedObjectAccess soa(self);
1835   StackHandleScope<7> hs(self);
1836 
1837   JNIEnv* env = Thread::Current()->GetJniEnv();
1838 
1839   // ArrayList
1840 
1841   // Load ArrayList and used methods (JNI).
1842   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1843   ASSERT_NE(nullptr, arraylist_jclass);
1844   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1845   ASSERT_NE(nullptr, arraylist_constructor);
1846   jmethodID contains_jmethod = env->GetMethodID(
1847       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1848   ASSERT_NE(nullptr, contains_jmethod);
1849   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1850   ASSERT_NE(nullptr, add_jmethod);
1851 
1852   // Get representation.
1853   ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
1854 
1855   // Patch up ArrayList.contains.
1856   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1857     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1858         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1859   }
1860 
1861   // List
1862 
1863   // Load List and used methods (JNI).
1864   jclass list_jclass = env->FindClass("java/util/List");
1865   ASSERT_NE(nullptr, list_jclass);
1866   jmethodID inf_contains_jmethod = env->GetMethodID(
1867       list_jclass, "contains", "(Ljava/lang/Object;)Z");
1868   ASSERT_NE(nullptr, inf_contains_jmethod);
1869 
1870   // Get mirror representation.
1871   ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
1872 
1873   // Object
1874 
1875   jclass obj_jclass = env->FindClass("java/lang/Object");
1876   ASSERT_NE(nullptr, obj_jclass);
1877   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1878   ASSERT_NE(nullptr, obj_constructor);
1879 
1880   // Create instances.
1881 
1882   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1883   ASSERT_NE(nullptr, jarray_list);
1884   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object>(jarray_list)));
1885 
1886   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1887   ASSERT_NE(nullptr, jobj);
1888   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(jobj)));
1889 
1890   // Invocation tests.
1891 
1892   // 1. imt_conflict
1893 
1894   // Contains.
1895 
1896   // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
1897   // that will create it: the runtime stub expects to be called by compiled code.
1898   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
1899   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
1900   ImtConflictTable* empty_conflict_table =
1901       Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
1902   void* data = linear_alloc->Alloc(
1903       self,
1904       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
1905   ImtConflictTable* new_table = new (data) ImtConflictTable(
1906       empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
1907   conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
1908 
1909   size_t result =
1910       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1911                                    reinterpret_cast<size_t>(array_list.Get()),
1912                                    reinterpret_cast<size_t>(obj.Get()),
1913                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1914                                    self,
1915                                    contains_amethod,
1916                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1917 
1918   ASSERT_FALSE(self->IsExceptionPending());
1919   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1920 
1921   // Add object.
1922 
1923   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1924 
1925   ASSERT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1926 
1927   // Contains.
1928 
1929   result =
1930       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1931                                    reinterpret_cast<size_t>(array_list.Get()),
1932                                    reinterpret_cast<size_t>(obj.Get()),
1933                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1934                                    self,
1935                                    contains_amethod,
1936                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1937 
1938   ASSERT_FALSE(self->IsExceptionPending());
1939   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1940 
1941   // 2. regular interface trampoline
1942 
1943   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1944                                reinterpret_cast<size_t>(array_list.Get()),
1945                                reinterpret_cast<size_t>(obj.Get()),
1946                                StubTest::GetEntrypoint(self,
1947                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
1948                                self, contains_amethod);
1949 
1950   ASSERT_FALSE(self->IsExceptionPending());
1951   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1952 
1953   result = Invoke3WithReferrer(
1954       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1955       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
1956       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
1957       contains_amethod);
1958 
1959   ASSERT_FALSE(self->IsExceptionPending());
1960   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1961 #else
1962   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1963   // Force-print to std::cout so it's also outside the logcat.
1964   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1965 #endif
1966 }
1967 
TEST_F(StubTest,StringIndexOf)1968 TEST_F(StubTest, StringIndexOf) {
1969 #if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
1970   Thread* self = Thread::Current();
1971   ScopedObjectAccess soa(self);
1972   // garbage is created during ClassLinker::Init
1973 
1974   // Create some strings
1975   // Use array so we can index into it and use a matrix for expected results
1976   // Setup: The first half is standard. The second half uses a non-zero offset.
1977   // TODO: Shared backing arrays.
1978   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1979   static constexpr size_t kStringCount = arraysize(c_str);
1980   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
1981   static constexpr size_t kCharCount = arraysize(c_char);
1982 
1983   StackHandleScope<kStringCount> hs(self);
1984   Handle<mirror::String> s[kStringCount];
1985 
1986   for (size_t i = 0; i < kStringCount; ++i) {
1987     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
1988   }
1989 
1990   // Matrix of expectations. First component is first parameter. Note we only check against the
1991   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1992   // rely on String::CompareTo being correct.
1993   static constexpr size_t kMaxLen = 9;
1994   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
1995 
1996   // Last dimension: start, offset by 1.
1997   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
1998   for (size_t x = 0; x < kStringCount; ++x) {
1999     for (size_t y = 0; y < kCharCount; ++y) {
2000       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2001         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
2002       }
2003     }
2004   }
2005 
2006   // Play with it...
2007 
2008   for (size_t x = 0; x < kStringCount; ++x) {
2009     for (size_t y = 0; y < kCharCount; ++y) {
2010       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2011         int32_t start = static_cast<int32_t>(z) - 1;
2012 
2013         // Test string_compareto x y
2014         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
2015                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
2016 
2017         EXPECT_FALSE(self->IsExceptionPending());
2018 
2019         // The result is a 32b signed integer
2020         union {
2021           size_t r;
2022           int32_t i;
2023         } conv;
2024         conv.r = result;
2025 
2026         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
2027             c_char[y] << " @ " << start;
2028       }
2029     }
2030   }
2031 
2032   // TODO: Deallocate things.
2033 
2034   // Tests done.
2035 #else
2036   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
2037   // Force-print to std::cout so it's also outside the logcat.
2038   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
2039 #endif
2040 }
2041 
2042 // TODO: Exercise the ReadBarrierMarkRegX entry points.
2043 
TEST_F(StubTest,ReadBarrier)2044 TEST_F(StubTest, ReadBarrier) {
2045 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2046       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2047   Thread* self = Thread::Current();
2048 
2049   const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
2050 
2051   // Create an object
2052   ScopedObjectAccess soa(self);
2053   // garbage is created during ClassLinker::Init
2054 
2055   StackHandleScope<2> hs(soa.Self());
2056   Handle<mirror::Class> c(
2057       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
2058 
2059   // Build an object instance
2060   Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
2061 
2062   EXPECT_FALSE(self->IsExceptionPending());
2063 
2064   size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
2065                           mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
2066 
2067   EXPECT_FALSE(self->IsExceptionPending());
2068   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2069   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2070   EXPECT_EQ(klass, obj->GetClass());
2071 
2072   // Tests done.
2073 #else
2074   LOG(INFO) << "Skipping read_barrier_slow";
2075   // Force-print to std::cout so it's also outside the logcat.
2076   std::cout << "Skipping read_barrier_slow" << std::endl;
2077 #endif
2078 }
2079 
TEST_F(StubTest,ReadBarrierForRoot)2080 TEST_F(StubTest, ReadBarrierForRoot) {
2081 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2082       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2083   Thread* self = Thread::Current();
2084 
2085   const uintptr_t readBarrierForRootSlow =
2086       StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
2087 
2088   // Create an object
2089   ScopedObjectAccess soa(self);
2090   // garbage is created during ClassLinker::Init
2091 
2092   StackHandleScope<1> hs(soa.Self());
2093 
2094   Handle<mirror::String> obj(
2095       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
2096 
2097   EXPECT_FALSE(self->IsExceptionPending());
2098 
2099   GcRoot<mirror::Class>& root = mirror::String::java_lang_String_;
2100   size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
2101 
2102   EXPECT_FALSE(self->IsExceptionPending());
2103   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2104   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2105   EXPECT_EQ(klass, obj->GetClass());
2106 
2107   // Tests done.
2108 #else
2109   LOG(INFO) << "Skipping read_barrier_for_root_slow";
2110   // Force-print to std::cout so it's also outside the logcat.
2111   std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
2112 #endif
2113 }
2114 
2115 }  // namespace art
2116