• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "base/callee_save_type.h"
22 #include "base/enums.h"
23 #include "class_linker-inl.h"
24 #include "class_root.h"
25 #include "common_runtime_test.h"
26 #include "entrypoints/quick/quick_entrypoints_enum.h"
27 #include "imt_conflict_table.h"
28 #include "jni/jni_internal.h"
29 #include "linear_alloc.h"
30 #include "mirror/class-alloc-inl.h"
31 #include "mirror/string-inl.h"
32 #include "mirror/object_array-alloc-inl.h"
33 #include "scoped_thread_state_change-inl.h"
34 
35 namespace art {
36 
37 
38 class StubTest : public CommonRuntimeTest {
39  protected:
40   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()41   void SetUp() override {
42     // Do the normal setup.
43     CommonRuntimeTest::SetUp();
44 
45     {
46       // Create callee-save methods
47       ScopedObjectAccess soa(Thread::Current());
48       runtime_->SetInstructionSet(kRuntimeISA);
49       for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
50         CalleeSaveType type = CalleeSaveType(i);
51         if (!runtime_->HasCalleeSaveMethod(type)) {
52           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
53         }
54       }
55     }
56   }
57 
SetUpRuntimeOptions(RuntimeOptions * options)58   void SetUpRuntimeOptions(RuntimeOptions *options) override {
59     // Use a smaller heap
60     for (std::pair<std::string, const void*>& pair : *options) {
61       if (pair.first.find("-Xmx") == 0) {
62         pair.first = "-Xmx4M";  // Smallest we can go.
63       }
64     }
65     options->push_back(std::make_pair("-Xint", nullptr));
66   }
67 
68   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)69   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
70     return &self->tlsPtr_;
71   }
72 
73  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)74   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
75     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
76   }
77 
78   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)79   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
80                              ArtMethod* referrer) {
81     return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
82   }
83 
84   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)85   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
86                                       Thread* self, ArtMethod* referrer, size_t hidden) {
87     // Push a transition back into managed code onto the linked list in thread.
88     ManagedStack fragment;
89     self->PushManagedStackFragment(&fragment);
90 
91     size_t result;
92     size_t fpr_result = 0;
93 #if defined(__i386__)
94     // TODO: Set the thread?
95 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
96 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
97     __asm__ __volatile__(
98         "movd %[hidden], %%xmm7\n\t"  // This is a memory op, so do this early. If it is off of
99                                       // esp, then we won't be able to access it after spilling.
100 
101         // Spill 6 registers.
102         PUSH(%%ebx)
103         PUSH(%%ecx)
104         PUSH(%%edx)
105         PUSH(%%esi)
106         PUSH(%%edi)
107         PUSH(%%ebp)
108 
109         // Store the inputs to the stack, but keep the referrer up top, less work.
110         PUSH(%[referrer])           // Align stack.
111         PUSH(%[referrer])           // Store referrer
112 
113         PUSH(%[arg0])
114         PUSH(%[arg1])
115         PUSH(%[arg2])
116         PUSH(%[code])
117         // Now read them back into the required registers.
118         POP(%%edi)
119         POP(%%edx)
120         POP(%%ecx)
121         POP(%%eax)
122         // Call is prepared now.
123 
124         "call *%%edi\n\t"           // Call the stub
125         "addl $8, %%esp\n\t"        // Pop referrer and padding.
126         ".cfi_adjust_cfa_offset -8\n\t"
127 
128         // Restore 6 registers.
129         POP(%%ebp)
130         POP(%%edi)
131         POP(%%esi)
132         POP(%%edx)
133         POP(%%ecx)
134         POP(%%ebx)
135 
136         : "=a" (result)
137           // Use the result from eax
138         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
139           [referrer]"r"(referrer), [hidden]"m"(hidden)
140           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
141         : "memory", "xmm7");  // clobber.
142 #undef PUSH
143 #undef POP
144 #elif defined(__arm__)
145     __asm__ __volatile__(
146         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
147         ".cfi_adjust_cfa_offset 52\n\t"
148         "push {r9}\n\t"
149         ".cfi_adjust_cfa_offset 4\n\t"
150         "mov r9, %[referrer]\n\n"
151         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
152         ".cfi_adjust_cfa_offset 8\n\t"
153         "ldr r9, [sp, #8]\n\t"
154 
155         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
156         "sub sp, sp, #24\n\t"
157         "str %[arg0], [sp]\n\t"
158         "str %[arg1], [sp, #4]\n\t"
159         "str %[arg2], [sp, #8]\n\t"
160         "str %[code], [sp, #12]\n\t"
161         "str %[self], [sp, #16]\n\t"
162         "str %[hidden], [sp, #20]\n\t"
163         "ldr r0, [sp]\n\t"
164         "ldr r1, [sp, #4]\n\t"
165         "ldr r2, [sp, #8]\n\t"
166         "ldr r3, [sp, #12]\n\t"
167         "ldr r9, [sp, #16]\n\t"
168         "ldr r12, [sp, #20]\n\t"
169         "add sp, sp, #24\n\t"
170 
171         "blx r3\n\t"                // Call the stub
172         "add sp, sp, #12\n\t"       // Pop null and padding
173         ".cfi_adjust_cfa_offset -12\n\t"
174         "pop {r1-r12, lr}\n\t"      // Restore state
175         ".cfi_adjust_cfa_offset -52\n\t"
176         "mov %[result], r0\n\t"     // Save the result
177         : [result] "=r" (result)
178           // Use the result from r0
179         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
180           [referrer] "r"(referrer), [hidden] "r"(hidden)
181         : "r0", "memory");  // clobber.
182 #elif defined(__aarch64__)
183     __asm__ __volatile__(
184         // Spill x0-x7 which we say we don't clobber. May contain args.
185         "sub sp, sp, #80\n\t"
186         ".cfi_adjust_cfa_offset 80\n\t"
187         "stp x0, x1, [sp]\n\t"
188         "stp x2, x3, [sp, #16]\n\t"
189         "stp x4, x5, [sp, #32]\n\t"
190         "stp x6, x7, [sp, #48]\n\t"
191         // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
192         // transition into the runtime via the blr instruction below and *not* save x20.
193         "stp x20, x21, [sp, #64]\n\t"
194 
195         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
196         ".cfi_adjust_cfa_offset 16\n\t"
197         "str %[referrer], [sp]\n\t"    // referrer
198 
199         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
200         "sub sp, sp, #48\n\t"
201         ".cfi_adjust_cfa_offset 48\n\t"
202         // All things are "r" constraints, so direct str/stp should work.
203         "stp %[arg0], %[arg1], [sp]\n\t"
204         "stp %[arg2], %[code], [sp, #16]\n\t"
205         "stp %[self], %[hidden], [sp, #32]\n\t"
206 
207         // Now we definitely have x0-x3 free, use it to garble d8 - d15
208         "movk x0, #0xfad0\n\t"
209         "movk x0, #0xebad, lsl #16\n\t"
210         "movk x0, #0xfad0, lsl #32\n\t"
211         "movk x0, #0xebad, lsl #48\n\t"
212         "fmov d8, x0\n\t"
213         "add x0, x0, 1\n\t"
214         "fmov d9, x0\n\t"
215         "add x0, x0, 1\n\t"
216         "fmov d10, x0\n\t"
217         "add x0, x0, 1\n\t"
218         "fmov d11, x0\n\t"
219         "add x0, x0, 1\n\t"
220         "fmov d12, x0\n\t"
221         "add x0, x0, 1\n\t"
222         "fmov d13, x0\n\t"
223         "add x0, x0, 1\n\t"
224         "fmov d14, x0\n\t"
225         "add x0, x0, 1\n\t"
226         "fmov d15, x0\n\t"
227 
228         // Load call params into the right registers.
229         "ldp x0, x1, [sp]\n\t"
230         "ldp x2, x3, [sp, #16]\n\t"
231         "ldp x19, x17, [sp, #32]\n\t"
232         "add sp, sp, #48\n\t"
233         ".cfi_adjust_cfa_offset -48\n\t"
234 
235         "blr x3\n\t"              // Call the stub
236         "mov x8, x0\n\t"          // Store result
237         "add sp, sp, #16\n\t"     // Drop the quick "frame"
238         ".cfi_adjust_cfa_offset -16\n\t"
239 
240         // Test d8 - d15. We can use x1 and x2.
241         "movk x1, #0xfad0\n\t"
242         "movk x1, #0xebad, lsl #16\n\t"
243         "movk x1, #0xfad0, lsl #32\n\t"
244         "movk x1, #0xebad, lsl #48\n\t"
245         "fmov x2, d8\n\t"
246         "cmp x1, x2\n\t"
247         "b.ne 1f\n\t"
248         "add x1, x1, 1\n\t"
249 
250         "fmov x2, d9\n\t"
251         "cmp x1, x2\n\t"
252         "b.ne 1f\n\t"
253         "add x1, x1, 1\n\t"
254 
255         "fmov x2, d10\n\t"
256         "cmp x1, x2\n\t"
257         "b.ne 1f\n\t"
258         "add x1, x1, 1\n\t"
259 
260         "fmov x2, d11\n\t"
261         "cmp x1, x2\n\t"
262         "b.ne 1f\n\t"
263         "add x1, x1, 1\n\t"
264 
265         "fmov x2, d12\n\t"
266         "cmp x1, x2\n\t"
267         "b.ne 1f\n\t"
268         "add x1, x1, 1\n\t"
269 
270         "fmov x2, d13\n\t"
271         "cmp x1, x2\n\t"
272         "b.ne 1f\n\t"
273         "add x1, x1, 1\n\t"
274 
275         "fmov x2, d14\n\t"
276         "cmp x1, x2\n\t"
277         "b.ne 1f\n\t"
278         "add x1, x1, 1\n\t"
279 
280         "fmov x2, d15\n\t"
281         "cmp x1, x2\n\t"
282         "b.ne 1f\n\t"
283 
284         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
285 
286         // Finish up.
287         "2:\n\t"
288         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
289         "ldp x2, x3, [sp, #16]\n\t"
290         "ldp x4, x5, [sp, #32]\n\t"
291         "ldp x6, x7, [sp, #48]\n\t"
292         "ldp x20, x21, [sp, #64]\n\t"
293         "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
294         ".cfi_adjust_cfa_offset -80\n\t"
295 
296         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
297         "mov %[result], x8\n\t"              // Store the call result
298 
299         "b 3f\n\t"                     // Goto end
300 
301         // Failed fpr verification.
302         "1:\n\t"
303         "mov x9, #1\n\t"
304         "b 2b\n\t"                     // Goto finish-up
305 
306         // End
307         "3:\n\t"
308         : [result] "=r" (result)
309           // Use the result from r0
310         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
311           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
312           // X18 is a reserved register, cannot be clobbered.
313           // Leave one register unclobbered, which is needed for compiling with
314           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
315           // which means we should unclobber one of the callee-saved registers that are unused.
316           // Here we use x20.
317           // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
318         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19",
319           "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
320           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
321           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
322           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
323           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
324           "memory");
325 #elif defined(__mips__) && !defined(__LP64__)
326     __asm__ __volatile__ (
327         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
328         "addiu $sp, $sp, -64\n\t"
329         "sw $a0, 0($sp)\n\t"
330         "sw $a1, 4($sp)\n\t"
331         "sw $a2, 8($sp)\n\t"
332         "sw $a3, 12($sp)\n\t"
333         "sw $t0, 16($sp)\n\t"
334         "sw $t1, 20($sp)\n\t"
335         "sw $t2, 24($sp)\n\t"
336         "sw $t3, 28($sp)\n\t"
337         "sw $t4, 32($sp)\n\t"
338         "sw $t5, 36($sp)\n\t"
339         "sw $t6, 40($sp)\n\t"
340         "sw $t7, 44($sp)\n\t"
341         // Spill gp register since it is caller save.
342         "sw $gp, 52($sp)\n\t"
343 
344         "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
345         "sw %[referrer], 0($sp)\n\t"
346 
347         // Push everything on the stack, so we don't rely on the order.
348         "addiu $sp, $sp, -24\n\t"
349         "sw %[arg0], 0($sp)\n\t"
350         "sw %[arg1], 4($sp)\n\t"
351         "sw %[arg2], 8($sp)\n\t"
352         "sw %[code], 12($sp)\n\t"
353         "sw %[self], 16($sp)\n\t"
354         "sw %[hidden], 20($sp)\n\t"
355 
356         // Load call params into the right registers.
357         "lw $a0, 0($sp)\n\t"
358         "lw $a1, 4($sp)\n\t"
359         "lw $a2, 8($sp)\n\t"
360         "lw $t9, 12($sp)\n\t"
361         "lw $s1, 16($sp)\n\t"
362         "lw $t7, 20($sp)\n\t"
363         "addiu $sp, $sp, 24\n\t"
364 
365         "jalr $t9\n\t"             // Call the stub.
366         "nop\n\t"
367         "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
368 
369         // Restore stuff not named clobbered.
370         "lw $a0, 0($sp)\n\t"
371         "lw $a1, 4($sp)\n\t"
372         "lw $a2, 8($sp)\n\t"
373         "lw $a3, 12($sp)\n\t"
374         "lw $t0, 16($sp)\n\t"
375         "lw $t1, 20($sp)\n\t"
376         "lw $t2, 24($sp)\n\t"
377         "lw $t3, 28($sp)\n\t"
378         "lw $t4, 32($sp)\n\t"
379         "lw $t5, 36($sp)\n\t"
380         "lw $t6, 40($sp)\n\t"
381         "lw $t7, 44($sp)\n\t"
382         // Restore gp.
383         "lw $gp, 52($sp)\n\t"
384         "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
385 
386         "move %[result], $v0\n\t"  // Store the call result.
387         : [result] "=r" (result)
388         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
389           [referrer] "r"(referrer), [hidden] "r"(hidden)
390         : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
391           "fp", "ra",
392           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
393           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
394           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
395           "memory");  // clobber.
396 #elif defined(__mips__) && defined(__LP64__)
397     __asm__ __volatile__ (
398         // Spill a0-a7 which we say we don't clobber. May contain args.
399         "daddiu $sp, $sp, -64\n\t"
400         "sd $a0, 0($sp)\n\t"
401         "sd $a1, 8($sp)\n\t"
402         "sd $a2, 16($sp)\n\t"
403         "sd $a3, 24($sp)\n\t"
404         "sd $a4, 32($sp)\n\t"
405         "sd $a5, 40($sp)\n\t"
406         "sd $a6, 48($sp)\n\t"
407         "sd $a7, 56($sp)\n\t"
408 
409         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
410         "sd %[referrer], 0($sp)\n\t"
411 
412         // Push everything on the stack, so we don't rely on the order.
413         "daddiu $sp, $sp, -48\n\t"
414         "sd %[arg0], 0($sp)\n\t"
415         "sd %[arg1], 8($sp)\n\t"
416         "sd %[arg2], 16($sp)\n\t"
417         "sd %[code], 24($sp)\n\t"
418         "sd %[self], 32($sp)\n\t"
419         "sd %[hidden], 40($sp)\n\t"
420 
421         // Load call params into the right registers.
422         "ld $a0, 0($sp)\n\t"
423         "ld $a1, 8($sp)\n\t"
424         "ld $a2, 16($sp)\n\t"
425         "ld $t9, 24($sp)\n\t"
426         "ld $s1, 32($sp)\n\t"
427         "ld $t0, 40($sp)\n\t"
428         "daddiu $sp, $sp, 48\n\t"
429 
430         "jalr $t9\n\t"              // Call the stub.
431         "nop\n\t"
432         "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
433 
434         // Restore stuff not named clobbered.
435         "ld $a0, 0($sp)\n\t"
436         "ld $a1, 8($sp)\n\t"
437         "ld $a2, 16($sp)\n\t"
438         "ld $a3, 24($sp)\n\t"
439         "ld $a4, 32($sp)\n\t"
440         "ld $a5, 40($sp)\n\t"
441         "ld $a6, 48($sp)\n\t"
442         "ld $a7, 56($sp)\n\t"
443         "daddiu $sp, $sp, 64\n\t"
444 
445         "move %[result], $v0\n\t"   // Store the call result.
446         : [result] "=r" (result)
447         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
448           [referrer] "r"(referrer), [hidden] "r"(hidden)
449         // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
450         // t0-t3 are ambiguous.
451         : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
452           "s7", "t8", "t9", "k0", "k1", "fp", "ra",
453           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
454           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
455           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
456           "memory");  // clobber.
457 #elif defined(__x86_64__) && !defined(__APPLE__)
458 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
459 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
460     // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
461     //       restoring almost all registers.
462     // TODO: Set the thread?
463     __asm__ __volatile__(
464         // Spill almost everything (except rax, rsp). 14 registers.
465         PUSH(%%rbx)
466         PUSH(%%rcx)
467         PUSH(%%rdx)
468         PUSH(%%rsi)
469         PUSH(%%rdi)
470         PUSH(%%rbp)
471         PUSH(%%r8)
472         PUSH(%%r9)
473         PUSH(%%r10)
474         PUSH(%%r11)
475         PUSH(%%r12)
476         PUSH(%%r13)
477         PUSH(%%r14)
478         PUSH(%%r15)
479 
480         PUSH(%[referrer])              // Push referrer & 16B alignment padding
481         PUSH(%[referrer])
482 
483         // Now juggle the input registers.
484         PUSH(%[arg0])
485         PUSH(%[arg1])
486         PUSH(%[arg2])
487         PUSH(%[hidden])
488         PUSH(%[code])
489         POP(%%r8)
490         POP(%%rax)
491         POP(%%rdx)
492         POP(%%rsi)
493         POP(%%rdi)
494 
495         "call *%%r8\n\t"                  // Call the stub
496         "addq $16, %%rsp\n\t"             // Pop null and padding
497         ".cfi_adjust_cfa_offset -16\n\t"
498 
499         POP(%%r15)
500         POP(%%r14)
501         POP(%%r13)
502         POP(%%r12)
503         POP(%%r11)
504         POP(%%r10)
505         POP(%%r9)
506         POP(%%r8)
507         POP(%%rbp)
508         POP(%%rdi)
509         POP(%%rsi)
510         POP(%%rdx)
511         POP(%%rcx)
512         POP(%%rbx)
513 
514         : "=a" (result)
515         // Use the result from rax
516         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
517           [referrer] "r"(referrer), [hidden] "r"(hidden)
518         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
519         // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
520         : "memory");  // We spill and restore (almost) all registers, so only mention memory here.
521 #undef PUSH
522 #undef POP
523 #else
524     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
525     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
526     result = 0;
527 #endif
528     // Pop transition.
529     self->PopManagedStackFragment(fragment);
530 
531     fp_result = fpr_result;
532     EXPECT_EQ(0U, fp_result);
533 
534     return result;
535   }
536 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)537   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
538     int32_t offset;
539     offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
540     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
541   }
542 
543  protected:
544   size_t fp_result;
545 };
546 
547 
TEST_F(StubTest,Memcpy)548 TEST_F(StubTest, Memcpy) {
549 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
550   Thread* self = Thread::Current();
551 
552   uint32_t orig[20];
553   uint32_t trg[20];
554   for (size_t i = 0; i < 20; ++i) {
555     orig[i] = i;
556     trg[i] = 0;
557   }
558 
559   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
560           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
561 
562   EXPECT_EQ(orig[0], trg[0]);
563 
564   for (size_t i = 1; i < 4; ++i) {
565     EXPECT_NE(orig[i], trg[i]);
566   }
567 
568   for (size_t i = 4; i < 14; ++i) {
569     EXPECT_EQ(orig[i], trg[i]);
570   }
571 
572   for (size_t i = 14; i < 20; ++i) {
573     EXPECT_NE(orig[i], trg[i]);
574   }
575 
576   // TODO: Test overlapping?
577 
578 #else
579   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
580   // Force-print to std::cout so it's also outside the logcat.
581   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
582 #endif
583 }
584 
TEST_F(StubTest,LockObject)585 TEST_F(StubTest, LockObject) {
586 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
587     (defined(__x86_64__) && !defined(__APPLE__))
588   static constexpr size_t kThinLockLoops = 100;
589 
590   Thread* self = Thread::Current();
591 
592   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
593 
594   // Create an object
595   ScopedObjectAccess soa(self);
596   // garbage is created during ClassLinker::Init
597 
598   StackHandleScope<2> hs(soa.Self());
599   Handle<mirror::String> obj(
600       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
601   LockWord lock = obj->GetLockWord(false);
602   LockWord::LockState old_state = lock.GetState();
603   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
604 
605   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
606 
607   LockWord lock_after = obj->GetLockWord(false);
608   LockWord::LockState new_state = lock_after.GetState();
609   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
610   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
611 
612   for (size_t i = 1; i < kThinLockLoops; ++i) {
613     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
614 
615     // Check we're at lock count i
616 
617     LockWord l_inc = obj->GetLockWord(false);
618     LockWord::LockState l_inc_state = l_inc.GetState();
619     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
620     EXPECT_EQ(l_inc.ThinLockCount(), i);
621   }
622 
623   // Force a fat lock by running identity hashcode to fill up lock word.
624   Handle<mirror::String> obj2(hs.NewHandle(
625       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
626 
627   obj2->IdentityHashCode();
628 
629   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
630 
631   LockWord lock_after2 = obj2->GetLockWord(false);
632   LockWord::LockState new_state2 = lock_after2.GetState();
633   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
634   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
635 
636   // Test done.
637 #else
638   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
639   // Force-print to std::cout so it's also outside the logcat.
640   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
641 #endif
642 }
643 
644 
645 class RandGen {
646  public:
RandGen(uint32_t seed)647   explicit RandGen(uint32_t seed) : val_(seed) {}
648 
next()649   uint32_t next() {
650     val_ = val_ * 48271 % 2147483647 + 13;
651     return val_;
652   }
653 
654   uint32_t val_;
655 };
656 
657 
658 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)659 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
660 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
661     (defined(__x86_64__) && !defined(__APPLE__))
662   static constexpr size_t kThinLockLoops = 100;
663 
664   Thread* self = Thread::Current();
665 
666   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
667   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
668   // Create an object
669   ScopedObjectAccess soa(self);
670   // garbage is created during ClassLinker::Init
671   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
672   StackHandleScope<kNumberOfLocks + 1> hs(self);
673   Handle<mirror::String> obj(
674       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
675   LockWord lock = obj->GetLockWord(false);
676   LockWord::LockState old_state = lock.GetState();
677   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
678 
679   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
680   // This should be an illegal monitor state.
681   EXPECT_TRUE(self->IsExceptionPending());
682   self->ClearException();
683 
684   LockWord lock_after = obj->GetLockWord(false);
685   LockWord::LockState new_state = lock_after.GetState();
686   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
687 
688   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
689 
690   LockWord lock_after2 = obj->GetLockWord(false);
691   LockWord::LockState new_state2 = lock_after2.GetState();
692   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
693 
694   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
695 
696   LockWord lock_after3 = obj->GetLockWord(false);
697   LockWord::LockState new_state3 = lock_after3.GetState();
698   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
699 
700   // Stress test:
701   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
702   // each step.
703 
704   RandGen r(0x1234);
705 
706   constexpr size_t kIterations = 10000;  // Number of iterations
707   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
708 
709   size_t counts[kNumberOfLocks];
710   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
711   Handle<mirror::String> objects[kNumberOfLocks];
712 
713   // Initialize = allocate.
714   for (size_t i = 0; i < kNumberOfLocks; ++i) {
715     counts[i] = 0;
716     fat[i] = false;
717     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
718   }
719 
720   for (size_t i = 0; i < kIterations; ++i) {
721     // Select which lock to update.
722     size_t index = r.next() % kNumberOfLocks;
723 
724     // Make lock fat?
725     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
726       fat[index] = true;
727       objects[index]->IdentityHashCode();
728 
729       LockWord lock_iter = objects[index]->GetLockWord(false);
730       LockWord::LockState iter_state = lock_iter.GetState();
731       if (counts[index] == 0) {
732         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
733       } else {
734         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
735       }
736     } else {
737       bool take_lock;  // Whether to lock or unlock in this step.
738       if (counts[index] == 0) {
739         take_lock = true;
740       } else if (counts[index] == kThinLockLoops) {
741         take_lock = false;
742       } else {
743         // Randomly.
744         take_lock = r.next() % 2 == 0;
745       }
746 
747       if (take_lock) {
748         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
749                       self);
750         counts[index]++;
751       } else {
752         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
753                       art_quick_unlock_object, self);
754         counts[index]--;
755       }
756 
757       EXPECT_FALSE(self->IsExceptionPending());
758 
759       // Check the new state.
760       LockWord lock_iter = objects[index]->GetLockWord(true);
761       LockWord::LockState iter_state = lock_iter.GetState();
762       if (fat[index]) {
763         // Abuse MonitorInfo.
764         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
765         MonitorInfo info(objects[index].Get());
766         EXPECT_EQ(counts[index], info.entry_count_) << index;
767       } else {
768         if (counts[index] > 0) {
769           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
770           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
771         } else {
772           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
773         }
774       }
775     }
776   }
777 
778   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
779   // Go reverse order to correctly handle Handles.
780   for (size_t i = 0; i < kNumberOfLocks; ++i) {
781     size_t index = kNumberOfLocks - 1 - i;
782     size_t count = counts[index];
783     while (count > 0) {
784       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
785                     self);
786       count--;
787     }
788 
789     LockWord lock_after4 = objects[index]->GetLockWord(false);
790     LockWord::LockState new_state4 = lock_after4.GetState();
791     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
792                 || LockWord::LockState::kFatLocked == new_state4);
793   }
794 
795   // Test done.
796 #else
797   UNUSED(test);
798   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
799   // Force-print to std::cout so it's also outside the logcat.
800   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
801 #endif
802 }
803 
TEST_F(StubTest,UnlockObject)804 TEST_F(StubTest, UnlockObject) {
805   // This will lead to monitor error messages in the log.
806   ScopedLogSeverity sls(LogSeverity::FATAL);
807 
808   TestUnlockObject(this);
809 }
810 
811 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
812     (defined(__x86_64__) && !defined(__APPLE__))
813 extern "C" void art_quick_check_instance_of(void);
814 #endif
815 
TEST_F(StubTest,CheckCast)816 TEST_F(StubTest, CheckCast) {
817 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
818     (defined(__x86_64__) && !defined(__APPLE__))
819   Thread* self = Thread::Current();
820 
821   const uintptr_t art_quick_check_instance_of =
822       StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
823 
824   // Find some classes.
825   ScopedObjectAccess soa(self);
826   // garbage is created during ClassLinker::Init
827 
828   VariableSizedHandleScope hs(soa.Self());
829   Handle<mirror::Class> klass_obj(
830       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
831   Handle<mirror::Class> klass_str(
832       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
833   Handle<mirror::Class> klass_list(
834       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
835   Handle<mirror::Class> klass_cloneable(
836         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
837   Handle<mirror::Class> klass_array_list(
838       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
839   Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
840   Handle<mirror::String> string(hs.NewHandle(
841       mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
842   Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
843 
844   EXPECT_FALSE(self->IsExceptionPending());
845 
846   Invoke3(reinterpret_cast<size_t>(obj.Get()),
847           reinterpret_cast<size_t>(klass_obj.Get()),
848           0U,
849           art_quick_check_instance_of,
850           self);
851   EXPECT_FALSE(self->IsExceptionPending());
852 
853   // Expected true: Test string instance of java.lang.String.
854   Invoke3(reinterpret_cast<size_t>(string.Get()),
855           reinterpret_cast<size_t>(klass_str.Get()),
856           0U,
857           art_quick_check_instance_of,
858           self);
859   EXPECT_FALSE(self->IsExceptionPending());
860 
861   // Expected true: Test string instance of java.lang.Object.
862   Invoke3(reinterpret_cast<size_t>(string.Get()),
863           reinterpret_cast<size_t>(klass_obj.Get()),
864           0U,
865           art_quick_check_instance_of,
866           self);
867   EXPECT_FALSE(self->IsExceptionPending());
868 
869   // Expected false: Test object instance of java.lang.String.
870   Invoke3(reinterpret_cast<size_t>(obj.Get()),
871           reinterpret_cast<size_t>(klass_str.Get()),
872           0U,
873           art_quick_check_instance_of,
874           self);
875   EXPECT_TRUE(self->IsExceptionPending());
876   self->ClearException();
877 
878   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
879           reinterpret_cast<size_t>(klass_list.Get()),
880           0U,
881           art_quick_check_instance_of,
882           self);
883   EXPECT_FALSE(self->IsExceptionPending());
884 
885   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
886           reinterpret_cast<size_t>(klass_cloneable.Get()),
887           0U,
888           art_quick_check_instance_of,
889           self);
890   EXPECT_FALSE(self->IsExceptionPending());
891 
892   Invoke3(reinterpret_cast<size_t>(string.Get()),
893           reinterpret_cast<size_t>(klass_array_list.Get()),
894           0U,
895           art_quick_check_instance_of,
896           self);
897   EXPECT_TRUE(self->IsExceptionPending());
898   self->ClearException();
899 
900   Invoke3(reinterpret_cast<size_t>(string.Get()),
901           reinterpret_cast<size_t>(klass_cloneable.Get()),
902           0U,
903           art_quick_check_instance_of,
904           self);
905   EXPECT_TRUE(self->IsExceptionPending());
906   self->ClearException();
907 
908 #else
909   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
910   // Force-print to std::cout so it's also outside the logcat.
911   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
912 #endif
913 }
914 
TEST_F(StubTest,AllocObject)915 TEST_F(StubTest, AllocObject) {
916 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
917     (defined(__x86_64__) && !defined(__APPLE__))
918   // This will lead to OOM  error messages in the log.
919   ScopedLogSeverity sls(LogSeverity::FATAL);
920 
921   // TODO: Check the "Unresolved" allocation stubs
922 
923   Thread* self = Thread::Current();
924   // Create an object
925   ScopedObjectAccess soa(self);
926   // garbage is created during ClassLinker::Init
927 
928   StackHandleScope<2> hs(soa.Self());
929   Handle<mirror::Class> c(
930       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
931 
932   // Play with it...
933 
934   EXPECT_FALSE(self->IsExceptionPending());
935   {
936     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
937                             StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
938                             self);
939 
940     EXPECT_FALSE(self->IsExceptionPending());
941     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
942     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
943     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
944     VerifyObject(obj);
945   }
946 
947   {
948     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
949                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
950                             self);
951 
952     EXPECT_FALSE(self->IsExceptionPending());
953     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
954     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
955     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
956     VerifyObject(obj);
957   }
958 
959   {
960     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
961                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
962                             self);
963 
964     EXPECT_FALSE(self->IsExceptionPending());
965     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
966     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
967     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
968     VerifyObject(obj);
969   }
970 
971   // Failure tests.
972 
973   // Out-of-memory.
974   {
975     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
976 
977     // Array helps to fill memory faster.
978     Handle<mirror::Class> ca(
979         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
980 
981     // Use arbitrary large amount for now.
982     static const size_t kMaxHandles = 1000000;
983     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
984 
985     std::vector<Handle<mirror::Object>> handles;
986     // Start allocating with 128K
987     size_t length = 128 * KB / 4;
988     while (length > 10) {
989       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
990           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
991       if (self->IsExceptionPending() || h == nullptr) {
992         self->ClearException();
993 
994         // Try a smaller length
995         length = length / 8;
996         // Use at most half the reported free space.
997         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
998         if (length * 8 > mem) {
999           length = mem / 8;
1000         }
1001       } else {
1002         handles.push_back(h);
1003       }
1004     }
1005     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1006 
1007     // Allocate simple objects till it fails.
1008     while (!self->IsExceptionPending()) {
1009       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1010       if (!self->IsExceptionPending() && h != nullptr) {
1011         handles.push_back(h);
1012       }
1013     }
1014     self->ClearException();
1015 
1016     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1017                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1018                             self);
1019     EXPECT_TRUE(self->IsExceptionPending());
1020     self->ClearException();
1021     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1022   }
1023 
1024   // Tests done.
1025 #else
1026   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1027   // Force-print to std::cout so it's also outside the logcat.
1028   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1029 #endif
1030 }
1031 
TEST_F(StubTest,AllocObjectArray)1032 TEST_F(StubTest, AllocObjectArray) {
1033 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1034     (defined(__x86_64__) && !defined(__APPLE__))
1035   // TODO: Check the "Unresolved" allocation stubs
1036 
1037   // This will lead to OOM  error messages in the log.
1038   ScopedLogSeverity sls(LogSeverity::FATAL);
1039 
1040   Thread* self = Thread::Current();
1041   // Create an object
1042   ScopedObjectAccess soa(self);
1043   // garbage is created during ClassLinker::Init
1044 
1045   StackHandleScope<1> hs(self);
1046   Handle<mirror::Class> c(
1047       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1048 
1049   // Play with it...
1050 
1051   EXPECT_FALSE(self->IsExceptionPending());
1052 
1053   {
1054     // We can use null in the second argument as we do not need a method here (not used in
1055     // resolved/initialized cases)
1056     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
1057                             reinterpret_cast<size_t>(nullptr),
1058                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1059                             self);
1060     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1061     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1062     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
1063     EXPECT_TRUE(obj->IsArrayInstance());
1064     EXPECT_TRUE(obj->IsObjectArray());
1065     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
1066     VerifyObject(obj);
1067     ObjPtr<mirror::Array> array = reinterpret_cast<mirror::Array*>(result);
1068     EXPECT_EQ(array->GetLength(), 10);
1069   }
1070 
1071   // Failure tests.
1072 
1073   // Out-of-memory.
1074   {
1075     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
1076                             GB,  // that should fail...
1077                             reinterpret_cast<size_t>(nullptr),
1078                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1079                             self);
1080 
1081     EXPECT_TRUE(self->IsExceptionPending());
1082     self->ClearException();
1083     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1084   }
1085 
1086   // Tests done.
1087 #else
1088   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1089   // Force-print to std::cout so it's also outside the logcat.
1090   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1091 #endif
1092 }
1093 
1094 
TEST_F(StubTest,StringCompareTo)1095 TEST_F(StubTest, StringCompareTo) {
1096   TEST_DISABLED_FOR_STRING_COMPRESSION();
1097   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
1098 #if defined(__i386__) || defined(__mips__) || \
1099     (defined(__x86_64__) && !defined(__APPLE__))
1100   // TODO: Check the "Unresolved" allocation stubs
1101 
1102   Thread* self = Thread::Current();
1103 
1104   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1105 
1106   ScopedObjectAccess soa(self);
1107   // garbage is created during ClassLinker::Init
1108 
1109   // Create some strings
1110   // Use array so we can index into it and use a matrix for expected results
1111   // Setup: The first half is standard. The second half uses a non-zero offset.
1112   // TODO: Shared backing arrays.
1113   const char* c[] = { "", "", "a", "aa", "ab",
1114       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
1115       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
1116       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
1117                                                   // defeat object-equal optimizations.
1118   static constexpr size_t kStringCount = arraysize(c);
1119 
1120   StackHandleScope<kStringCount> hs(self);
1121   Handle<mirror::String> s[kStringCount];
1122 
1123   for (size_t i = 0; i < kStringCount; ++i) {
1124     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1125   }
1126 
1127   // TODO: wide characters
1128 
1129   // Matrix of expectations. First component is first parameter. Note we only check against the
1130   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1131   // rely on String::CompareTo being correct.
1132   int32_t expected[kStringCount][kStringCount];
1133   for (size_t x = 0; x < kStringCount; ++x) {
1134     for (size_t y = 0; y < kStringCount; ++y) {
1135       expected[x][y] = s[x]->CompareTo(s[y].Get());
1136     }
1137   }
1138 
1139   // Play with it...
1140 
1141   for (size_t x = 0; x < kStringCount; ++x) {
1142     for (size_t y = 0; y < kStringCount; ++y) {
1143       // Test string_compareto x y
1144       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1145                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1146                               art_quick_string_compareto, self);
1147 
1148       EXPECT_FALSE(self->IsExceptionPending());
1149 
1150       // The result is a 32b signed integer
1151       union {
1152         size_t r;
1153         int32_t i;
1154       } conv;
1155       conv.r = result;
1156       int32_t e = expected[x][y];
1157       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1158           conv.r;
1159       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1160           conv.r;
1161       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1162           conv.r;
1163     }
1164   }
1165 
1166   // TODO: Deallocate things.
1167 
1168   // Tests done.
1169 #else
1170   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1171   // Force-print to std::cout so it's also outside the logcat.
1172   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1173       std::endl;
1174 #endif
1175 }
1176 
1177 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1178 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1179                                 ArtMethod* referrer, StubTest* test)
1180     REQUIRES_SHARED(Locks::mutator_lock_) {
1181 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1182     (defined(__x86_64__) && !defined(__APPLE__))
1183   constexpr size_t num_values = 5;
1184   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1185 
1186   for (size_t i = 0; i < num_values; ++i) {
1187     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1188                               static_cast<size_t>(values[i]),
1189                               0U,
1190                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1191                               self,
1192                               referrer);
1193 
1194     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1195                                            0U, 0U,
1196                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1197                                            self,
1198                                            referrer);
1199     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1200     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1201   }
1202 #else
1203   UNUSED(f, self, referrer, test);
1204   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1205   // Force-print to std::cout so it's also outside the logcat.
1206   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1207 #endif
1208 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1209 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1210                              StubTest* test)
1211     REQUIRES_SHARED(Locks::mutator_lock_) {
1212 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1213     (defined(__x86_64__) && !defined(__APPLE__))
1214   int8_t values[] = { -128, -64, 0, 64, 127 };
1215 
1216   for (size_t i = 0; i < arraysize(values); ++i) {
1217     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1218                               static_cast<size_t>(values[i]),
1219                               0U,
1220                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1221                               self,
1222                               referrer);
1223 
1224     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1225                                            0U, 0U,
1226                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1227                                            self,
1228                                            referrer);
1229     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1230   }
1231 #else
1232   UNUSED(f, self, referrer, test);
1233   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1234   // Force-print to std::cout so it's also outside the logcat.
1235   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1236 #endif
1237 }
1238 
1239 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1240 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1241                                   ArtMethod* referrer, StubTest* test)
1242     REQUIRES_SHARED(Locks::mutator_lock_) {
1243 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1244     (defined(__x86_64__) && !defined(__APPLE__))
1245   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1246 
1247   for (size_t i = 0; i < arraysize(values); ++i) {
1248     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1249                               reinterpret_cast<size_t>(obj->Get()),
1250                               static_cast<size_t>(values[i]),
1251                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1252                               self,
1253                               referrer);
1254 
1255     uint8_t res = f->GetBoolean(obj->Get());
1256     EXPECT_EQ(values[i], res) << "Iteration " << i;
1257 
1258     f->SetBoolean<false>(obj->Get(), res);
1259 
1260     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1261                                             reinterpret_cast<size_t>(obj->Get()),
1262                                             0U,
1263                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1264                                             self,
1265                                             referrer);
1266     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1267   }
1268 #else
1269   UNUSED(obj, f, self, referrer, test);
1270   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1271   // Force-print to std::cout so it's also outside the logcat.
1272   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1273 #endif
1274 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1275 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1276                              Thread* self, ArtMethod* referrer, StubTest* test)
1277     REQUIRES_SHARED(Locks::mutator_lock_) {
1278 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1279     (defined(__x86_64__) && !defined(__APPLE__))
1280   int8_t values[] = { -128, -64, 0, 64, 127 };
1281 
1282   for (size_t i = 0; i < arraysize(values); ++i) {
1283     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1284                               reinterpret_cast<size_t>(obj->Get()),
1285                               static_cast<size_t>(values[i]),
1286                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1287                               self,
1288                               referrer);
1289 
1290     int8_t res = f->GetByte(obj->Get());
1291     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1292     f->SetByte<false>(obj->Get(), ++res);
1293 
1294     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1295                                             reinterpret_cast<size_t>(obj->Get()),
1296                                             0U,
1297                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1298                                             self,
1299                                             referrer);
1300     EXPECT_EQ(res, static_cast<int8_t>(res2));
1301   }
1302 #else
1303   UNUSED(obj, f, self, referrer, test);
1304   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1305   // Force-print to std::cout so it's also outside the logcat.
1306   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1307 #endif
1308 }
1309 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1310 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1311                              StubTest* test)
1312     REQUIRES_SHARED(Locks::mutator_lock_) {
1313 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1314     (defined(__x86_64__) && !defined(__APPLE__))
1315   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1316 
1317   for (size_t i = 0; i < arraysize(values); ++i) {
1318     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1319                               static_cast<size_t>(values[i]),
1320                               0U,
1321                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1322                               self,
1323                               referrer);
1324 
1325     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1326                                            0U, 0U,
1327                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1328                                            self,
1329                                            referrer);
1330 
1331     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1332   }
1333 #else
1334   UNUSED(f, self, referrer, test);
1335   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1336   // Force-print to std::cout so it's also outside the logcat.
1337   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1338 #endif
1339 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1340 static void GetSetShortStatic(ArtField* f, Thread* self,
1341                               ArtMethod* referrer, StubTest* test)
1342     REQUIRES_SHARED(Locks::mutator_lock_) {
1343 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1344     (defined(__x86_64__) && !defined(__APPLE__))
1345   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1346 
1347   for (size_t i = 0; i < arraysize(values); ++i) {
1348     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1349                               static_cast<size_t>(values[i]),
1350                               0U,
1351                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1352                               self,
1353                               referrer);
1354 
1355     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1356                                            0U, 0U,
1357                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1358                                            self,
1359                                            referrer);
1360 
1361     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1362   }
1363 #else
1364   UNUSED(f, self, referrer, test);
1365   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1366   // Force-print to std::cout so it's also outside the logcat.
1367   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1368 #endif
1369 }
1370 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1371 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1372                                Thread* self, ArtMethod* referrer, StubTest* test)
1373     REQUIRES_SHARED(Locks::mutator_lock_) {
1374 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1375     (defined(__x86_64__) && !defined(__APPLE__))
1376   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1377 
1378   for (size_t i = 0; i < arraysize(values); ++i) {
1379     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1380                               reinterpret_cast<size_t>(obj->Get()),
1381                               static_cast<size_t>(values[i]),
1382                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1383                               self,
1384                               referrer);
1385 
1386     uint16_t res = f->GetChar(obj->Get());
1387     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1388     f->SetChar<false>(obj->Get(), ++res);
1389 
1390     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1391                                             reinterpret_cast<size_t>(obj->Get()),
1392                                             0U,
1393                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1394                                             self,
1395                                             referrer);
1396     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1397   }
1398 #else
1399   UNUSED(obj, f, self, referrer, test);
1400   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1401   // Force-print to std::cout so it's also outside the logcat.
1402   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1403 #endif
1404 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1405 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1406                              Thread* self, ArtMethod* referrer, StubTest* test)
1407     REQUIRES_SHARED(Locks::mutator_lock_) {
1408 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1409     (defined(__x86_64__) && !defined(__APPLE__))
1410   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1411 
1412   for (size_t i = 0; i < arraysize(values); ++i) {
1413     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1414                               reinterpret_cast<size_t>(obj->Get()),
1415                               static_cast<size_t>(values[i]),
1416                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1417                               self,
1418                               referrer);
1419 
1420     int16_t res = f->GetShort(obj->Get());
1421     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1422     f->SetShort<false>(obj->Get(), ++res);
1423 
1424     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1425                                             reinterpret_cast<size_t>(obj->Get()),
1426                                             0U,
1427                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1428                                             self,
1429                                             referrer);
1430     EXPECT_EQ(res, static_cast<int16_t>(res2));
1431   }
1432 #else
1433   UNUSED(obj, f, self, referrer, test);
1434   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1435   // Force-print to std::cout so it's also outside the logcat.
1436   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1437 #endif
1438 }
1439 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1440 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1441                            StubTest* test)
1442     REQUIRES_SHARED(Locks::mutator_lock_) {
1443 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1444     (defined(__x86_64__) && !defined(__APPLE__))
1445   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1446 
1447   for (size_t i = 0; i < arraysize(values); ++i) {
1448     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1449                               static_cast<size_t>(values[i]),
1450                               0U,
1451                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1452                               self,
1453                               referrer);
1454 
1455     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1456                                            0U, 0U,
1457                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1458                                            self,
1459                                            referrer);
1460 
1461 #if defined(__mips__) && defined(__LP64__)
1462     EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
1463 #else
1464     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1465 #endif
1466   }
1467 #else
1468   UNUSED(f, self, referrer, test);
1469   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1470   // Force-print to std::cout so it's also outside the logcat.
1471   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1472 #endif
1473 }
1474 
1475 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1476 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1477                              Thread* self, ArtMethod* referrer, StubTest* test)
1478     REQUIRES_SHARED(Locks::mutator_lock_) {
1479 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1480     (defined(__x86_64__) && !defined(__APPLE__))
1481   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1482 
1483   for (size_t i = 0; i < arraysize(values); ++i) {
1484     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1485                               reinterpret_cast<size_t>(obj->Get()),
1486                               static_cast<size_t>(values[i]),
1487                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1488                               self,
1489                               referrer);
1490 
1491     int32_t res = f->GetInt(obj->Get());
1492     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1493 
1494     res++;
1495     f->SetInt<false>(obj->Get(), res);
1496 
1497     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1498                                             reinterpret_cast<size_t>(obj->Get()),
1499                                             0U,
1500                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1501                                             self,
1502                                             referrer);
1503     EXPECT_EQ(res, static_cast<int32_t>(res2));
1504   }
1505 #else
1506   UNUSED(obj, f, self, referrer, test);
1507   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1508   // Force-print to std::cout so it's also outside the logcat.
1509   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1510 #endif
1511 }
1512 
1513 
1514 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1515     (defined(__x86_64__) && !defined(__APPLE__))
1516 
set_and_check_static(uint32_t f_idx,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1517 static void set_and_check_static(uint32_t f_idx,
1518                                  ObjPtr<mirror::Object> val,
1519                                  Thread* self,
1520                                  ArtMethod* referrer,
1521                                  StubTest* test)
1522     REQUIRES_SHARED(Locks::mutator_lock_) {
1523   StackHandleScope<1u> hs(self);
1524   Handle<mirror::Object> h_val = hs.NewHandle(val);
1525   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1526                             reinterpret_cast<size_t>(h_val.Get()),
1527                             0U,
1528                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1529                             self,
1530                             referrer);
1531 
1532   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1533                                          0U,
1534                                          0U,
1535                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1536                                          self,
1537                                          referrer);
1538 
1539   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1540 }
1541 #endif
1542 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1543 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1544                             StubTest* test)
1545     REQUIRES_SHARED(Locks::mutator_lock_) {
1546 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1547     (defined(__x86_64__) && !defined(__APPLE__))
1548   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1549 
1550   // Allocate a string object for simplicity.
1551   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1552   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1553 
1554   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1555 #else
1556   UNUSED(f, self, referrer, test);
1557   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1558   // Force-print to std::cout so it's also outside the logcat.
1559   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1560 #endif
1561 }
1562 
1563 
1564 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1565     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,ObjPtr<mirror::Object> trg,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1566 static void set_and_check_instance(ArtField* f,
1567                                    ObjPtr<mirror::Object> trg,
1568                                    ObjPtr<mirror::Object> val,
1569                                    Thread* self,
1570                                    ArtMethod* referrer,
1571                                    StubTest* test)
1572     REQUIRES_SHARED(Locks::mutator_lock_) {
1573   StackHandleScope<2u> hs(self);
1574   Handle<mirror::Object> h_trg = hs.NewHandle(trg);
1575   Handle<mirror::Object> h_val = hs.NewHandle(val);
1576   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1577                             reinterpret_cast<size_t>(h_trg.Get()),
1578                             reinterpret_cast<size_t>(h_val.Get()),
1579                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1580                             self,
1581                             referrer);
1582 
1583   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1584                                          reinterpret_cast<size_t>(h_trg.Get()),
1585                                          0U,
1586                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1587                                          self,
1588                                          referrer);
1589 
1590   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1591 
1592   EXPECT_OBJ_PTR_EQ(h_val.Get(), f->GetObj(h_trg.Get()));
1593 }
1594 #endif
1595 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1596 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1597                               Thread* self, ArtMethod* referrer, StubTest* test)
1598     REQUIRES_SHARED(Locks::mutator_lock_) {
1599 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1600     (defined(__x86_64__) && !defined(__APPLE__))
1601   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1602 
1603   // Allocate a string object for simplicity.
1604   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1605   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1606 
1607   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1608 #else
1609   UNUSED(obj, f, self, referrer, test);
1610   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1611   // Force-print to std::cout so it's also outside the logcat.
1612   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1613 #endif
1614 }
1615 
1616 
1617 // TODO: Complete these tests for 32b architectures
1618 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1619 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1620                            StubTest* test)
1621     REQUIRES_SHARED(Locks::mutator_lock_) {
1622 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
1623     || defined(__aarch64__)
1624   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1625 
1626   for (size_t i = 0; i < arraysize(values); ++i) {
1627     // 64 bit FieldSet stores the set value in the second register.
1628     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1629                               values[i],
1630                               0U,
1631                               StubTest::GetEntrypoint(self, kQuickSet64Static),
1632                               self,
1633                               referrer);
1634 
1635     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1636                                            0U, 0U,
1637                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1638                                            self,
1639                                            referrer);
1640 
1641     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1642   }
1643 #else
1644   UNUSED(f, self, referrer, test);
1645   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1646   // Force-print to std::cout so it's also outside the logcat.
1647   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1648 #endif
1649 }
1650 
1651 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1652 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1653                              Thread* self, ArtMethod* referrer, StubTest* test)
1654     REQUIRES_SHARED(Locks::mutator_lock_) {
1655 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
1656     defined(__aarch64__)
1657   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1658 
1659   for (size_t i = 0; i < arraysize(values); ++i) {
1660     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1661                               reinterpret_cast<size_t>(obj->Get()),
1662                               static_cast<size_t>(values[i]),
1663                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1664                               self,
1665                               referrer);
1666 
1667     int64_t res = f->GetLong(obj->Get());
1668     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1669 
1670     res++;
1671     f->SetLong<false>(obj->Get(), res);
1672 
1673     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1674                                             reinterpret_cast<size_t>(obj->Get()),
1675                                             0U,
1676                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1677                                             self,
1678                                             referrer);
1679     EXPECT_EQ(res, static_cast<int64_t>(res2));
1680   }
1681 #else
1682   UNUSED(obj, f, self, referrer, test);
1683   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1684   // Force-print to std::cout so it's also outside the logcat.
1685   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1686 #endif
1687 }
1688 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1689 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1690   // garbage is created during ClassLinker::Init
1691 
1692   JNIEnv* env = Thread::Current()->GetJniEnv();
1693   jclass jc = env->FindClass("AllFields");
1694   CHECK(jc != nullptr);
1695   jobject o = env->AllocObject(jc);
1696   CHECK(o != nullptr);
1697 
1698   ScopedObjectAccess soa(self);
1699   StackHandleScope<3> hs(self);
1700   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(o)));
1701   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1702   // Need a method as a referrer
1703   ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
1704 
1705   // Play with it...
1706 
1707   // Static fields.
1708   for (ArtField& f : c->GetSFields()) {
1709     Primitive::Type type = f.GetTypeAsPrimitiveType();
1710     if (test_type != type) {
1711      continue;
1712     }
1713     switch (type) {
1714       case Primitive::Type::kPrimBoolean:
1715         GetSetBooleanStatic(&f, self, m, test);
1716         break;
1717       case Primitive::Type::kPrimByte:
1718         GetSetByteStatic(&f, self, m, test);
1719         break;
1720       case Primitive::Type::kPrimChar:
1721         GetSetCharStatic(&f, self, m, test);
1722         break;
1723       case Primitive::Type::kPrimShort:
1724         GetSetShortStatic(&f, self, m, test);
1725         break;
1726       case Primitive::Type::kPrimInt:
1727         GetSet32Static(&f, self, m, test);
1728         break;
1729       case Primitive::Type::kPrimLong:
1730         GetSet64Static(&f, self, m, test);
1731         break;
1732       case Primitive::Type::kPrimNot:
1733         // Don't try array.
1734         if (f.GetTypeDescriptor()[0] != '[') {
1735           GetSetObjStatic(&f, self, m, test);
1736         }
1737         break;
1738       default:
1739         break;  // Skip.
1740     }
1741   }
1742 
1743   // Instance fields.
1744   for (ArtField& f : c->GetIFields()) {
1745     Primitive::Type type = f.GetTypeAsPrimitiveType();
1746     if (test_type != type) {
1747       continue;
1748     }
1749     switch (type) {
1750       case Primitive::Type::kPrimBoolean:
1751         GetSetBooleanInstance(&obj, &f, self, m, test);
1752         break;
1753       case Primitive::Type::kPrimByte:
1754         GetSetByteInstance(&obj, &f, self, m, test);
1755         break;
1756       case Primitive::Type::kPrimChar:
1757         GetSetCharInstance(&obj, &f, self, m, test);
1758         break;
1759       case Primitive::Type::kPrimShort:
1760         GetSetShortInstance(&obj, &f, self, m, test);
1761         break;
1762       case Primitive::Type::kPrimInt:
1763         GetSet32Instance(&obj, &f, self, m, test);
1764         break;
1765       case Primitive::Type::kPrimLong:
1766         GetSet64Instance(&obj, &f, self, m, test);
1767         break;
1768       case Primitive::Type::kPrimNot:
1769         // Don't try array.
1770         if (f.GetTypeDescriptor()[0] != '[') {
1771           GetSetObjInstance(&obj, &f, self, m, test);
1772         }
1773         break;
1774       default:
1775         break;  // Skip.
1776     }
1777   }
1778 
1779   // TODO: Deallocate things.
1780 }
1781 
TEST_F(StubTest,Fields8)1782 TEST_F(StubTest, Fields8) {
1783   Thread* self = Thread::Current();
1784 
1785   self->TransitionFromSuspendedToRunnable();
1786   LoadDex("AllFields");
1787   bool started = runtime_->Start();
1788   CHECK(started);
1789 
1790   TestFields(self, this, Primitive::Type::kPrimBoolean);
1791   TestFields(self, this, Primitive::Type::kPrimByte);
1792 }
1793 
TEST_F(StubTest,Fields16)1794 TEST_F(StubTest, Fields16) {
1795   Thread* self = Thread::Current();
1796 
1797   self->TransitionFromSuspendedToRunnable();
1798   LoadDex("AllFields");
1799   bool started = runtime_->Start();
1800   CHECK(started);
1801 
1802   TestFields(self, this, Primitive::Type::kPrimChar);
1803   TestFields(self, this, Primitive::Type::kPrimShort);
1804 }
1805 
TEST_F(StubTest,Fields32)1806 TEST_F(StubTest, Fields32) {
1807   Thread* self = Thread::Current();
1808 
1809   self->TransitionFromSuspendedToRunnable();
1810   LoadDex("AllFields");
1811   bool started = runtime_->Start();
1812   CHECK(started);
1813 
1814   TestFields(self, this, Primitive::Type::kPrimInt);
1815 }
1816 
TEST_F(StubTest,FieldsObj)1817 TEST_F(StubTest, FieldsObj) {
1818   Thread* self = Thread::Current();
1819 
1820   self->TransitionFromSuspendedToRunnable();
1821   LoadDex("AllFields");
1822   bool started = runtime_->Start();
1823   CHECK(started);
1824 
1825   TestFields(self, this, Primitive::Type::kPrimNot);
1826 }
1827 
TEST_F(StubTest,Fields64)1828 TEST_F(StubTest, Fields64) {
1829   Thread* self = Thread::Current();
1830 
1831   self->TransitionFromSuspendedToRunnable();
1832   LoadDex("AllFields");
1833   bool started = runtime_->Start();
1834   CHECK(started);
1835 
1836   TestFields(self, this, Primitive::Type::kPrimLong);
1837 }
1838 
1839 // Disabled, b/27991555 .
1840 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1841 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1842 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1843 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1844 TEST_F(StubTest, DISABLED_IMT) {
1845 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1846     (defined(__x86_64__) && !defined(__APPLE__))
1847   Thread* self = Thread::Current();
1848 
1849   ScopedObjectAccess soa(self);
1850   StackHandleScope<7> hs(self);
1851 
1852   JNIEnv* env = Thread::Current()->GetJniEnv();
1853 
1854   // ArrayList
1855 
1856   // Load ArrayList and used methods (JNI).
1857   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1858   ASSERT_NE(nullptr, arraylist_jclass);
1859   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1860   ASSERT_NE(nullptr, arraylist_constructor);
1861   jmethodID contains_jmethod = env->GetMethodID(
1862       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1863   ASSERT_NE(nullptr, contains_jmethod);
1864   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1865   ASSERT_NE(nullptr, add_jmethod);
1866 
1867   // Get representation.
1868   ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
1869 
1870   // Patch up ArrayList.contains.
1871   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1872     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1873         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1874   }
1875 
1876   // List
1877 
1878   // Load List and used methods (JNI).
1879   jclass list_jclass = env->FindClass("java/util/List");
1880   ASSERT_NE(nullptr, list_jclass);
1881   jmethodID inf_contains_jmethod = env->GetMethodID(
1882       list_jclass, "contains", "(Ljava/lang/Object;)Z");
1883   ASSERT_NE(nullptr, inf_contains_jmethod);
1884 
1885   // Get mirror representation.
1886   ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
1887 
1888   // Object
1889 
1890   jclass obj_jclass = env->FindClass("java/lang/Object");
1891   ASSERT_NE(nullptr, obj_jclass);
1892   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1893   ASSERT_NE(nullptr, obj_constructor);
1894 
1895   // Create instances.
1896 
1897   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1898   ASSERT_NE(nullptr, jarray_list);
1899   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object>(jarray_list)));
1900 
1901   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1902   ASSERT_NE(nullptr, jobj);
1903   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(jobj)));
1904 
1905   // Invocation tests.
1906 
1907   // 1. imt_conflict
1908 
1909   // Contains.
1910 
1911   // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
1912   // that will create it: the runtime stub expects to be called by compiled code.
1913   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
1914   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
1915   ImtConflictTable* empty_conflict_table =
1916       Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
1917   void* data = linear_alloc->Alloc(
1918       self,
1919       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
1920   ImtConflictTable* new_table = new (data) ImtConflictTable(
1921       empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
1922   conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
1923 
1924   size_t result =
1925       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1926                                    reinterpret_cast<size_t>(array_list.Get()),
1927                                    reinterpret_cast<size_t>(obj.Get()),
1928                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1929                                    self,
1930                                    contains_amethod,
1931                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1932 
1933   ASSERT_FALSE(self->IsExceptionPending());
1934   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1935 
1936   // Add object.
1937 
1938   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1939 
1940   ASSERT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1941 
1942   // Contains.
1943 
1944   result =
1945       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1946                                    reinterpret_cast<size_t>(array_list.Get()),
1947                                    reinterpret_cast<size_t>(obj.Get()),
1948                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1949                                    self,
1950                                    contains_amethod,
1951                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1952 
1953   ASSERT_FALSE(self->IsExceptionPending());
1954   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1955 
1956   // 2. regular interface trampoline
1957 
1958   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1959                                reinterpret_cast<size_t>(array_list.Get()),
1960                                reinterpret_cast<size_t>(obj.Get()),
1961                                StubTest::GetEntrypoint(self,
1962                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
1963                                self, contains_amethod);
1964 
1965   ASSERT_FALSE(self->IsExceptionPending());
1966   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1967 
1968   result = Invoke3WithReferrer(
1969       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1970       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
1971       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
1972       contains_amethod);
1973 
1974   ASSERT_FALSE(self->IsExceptionPending());
1975   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1976 #else
1977   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1978   // Force-print to std::cout so it's also outside the logcat.
1979   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1980 #endif
1981 }
1982 
TEST_F(StubTest,StringIndexOf)1983 TEST_F(StubTest, StringIndexOf) {
1984 #if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
1985   Thread* self = Thread::Current();
1986   ScopedObjectAccess soa(self);
1987   // garbage is created during ClassLinker::Init
1988 
1989   // Create some strings
1990   // Use array so we can index into it and use a matrix for expected results
1991   // Setup: The first half is standard. The second half uses a non-zero offset.
1992   // TODO: Shared backing arrays.
1993   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1994   static constexpr size_t kStringCount = arraysize(c_str);
1995   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
1996   static constexpr size_t kCharCount = arraysize(c_char);
1997 
1998   StackHandleScope<kStringCount> hs(self);
1999   Handle<mirror::String> s[kStringCount];
2000 
2001   for (size_t i = 0; i < kStringCount; ++i) {
2002     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
2003   }
2004 
2005   // Matrix of expectations. First component is first parameter. Note we only check against the
2006   // sign, not the value. As we are testing random offsets, we need to compute this and need to
2007   // rely on String::CompareTo being correct.
2008   static constexpr size_t kMaxLen = 9;
2009   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
2010 
2011   // Last dimension: start, offset by 1.
2012   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
2013   for (size_t x = 0; x < kStringCount; ++x) {
2014     for (size_t y = 0; y < kCharCount; ++y) {
2015       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2016         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
2017       }
2018     }
2019   }
2020 
2021   // Play with it...
2022 
2023   for (size_t x = 0; x < kStringCount; ++x) {
2024     for (size_t y = 0; y < kCharCount; ++y) {
2025       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2026         int32_t start = static_cast<int32_t>(z) - 1;
2027 
2028         // Test string_compareto x y
2029         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
2030                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
2031 
2032         EXPECT_FALSE(self->IsExceptionPending());
2033 
2034         // The result is a 32b signed integer
2035         union {
2036           size_t r;
2037           int32_t i;
2038         } conv;
2039         conv.r = result;
2040 
2041         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
2042             c_char[y] << " @ " << start;
2043       }
2044     }
2045   }
2046 
2047   // TODO: Deallocate things.
2048 
2049   // Tests done.
2050 #else
2051   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
2052   // Force-print to std::cout so it's also outside the logcat.
2053   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
2054 #endif
2055 }
2056 
2057 // TODO: Exercise the ReadBarrierMarkRegX entry points.
2058 
TEST_F(StubTest,ReadBarrier)2059 TEST_F(StubTest, ReadBarrier) {
2060 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2061       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2062   Thread* self = Thread::Current();
2063 
2064   const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
2065 
2066   // Create an object
2067   ScopedObjectAccess soa(self);
2068   // garbage is created during ClassLinker::Init
2069 
2070   StackHandleScope<2> hs(soa.Self());
2071   Handle<mirror::Class> c(
2072       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
2073 
2074   // Build an object instance
2075   Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
2076 
2077   EXPECT_FALSE(self->IsExceptionPending());
2078 
2079   size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
2080                           mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
2081 
2082   EXPECT_FALSE(self->IsExceptionPending());
2083   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2084   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2085   EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
2086 
2087   // Tests done.
2088 #else
2089   LOG(INFO) << "Skipping read_barrier_slow";
2090   // Force-print to std::cout so it's also outside the logcat.
2091   std::cout << "Skipping read_barrier_slow" << std::endl;
2092 #endif
2093 }
2094 
TEST_F(StubTest,ReadBarrierForRoot)2095 TEST_F(StubTest, ReadBarrierForRoot) {
2096 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2097       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2098   Thread* self = Thread::Current();
2099 
2100   const uintptr_t readBarrierForRootSlow =
2101       StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
2102 
2103   // Create an object
2104   ScopedObjectAccess soa(self);
2105   // garbage is created during ClassLinker::Init
2106 
2107   StackHandleScope<1> hs(soa.Self());
2108 
2109   Handle<mirror::String> obj(
2110       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
2111 
2112   EXPECT_FALSE(self->IsExceptionPending());
2113 
2114   GcRoot<mirror::Class> root(GetClassRoot<mirror::String>());
2115   size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
2116 
2117   EXPECT_FALSE(self->IsExceptionPending());
2118   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2119   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2120   EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
2121 
2122   // Tests done.
2123 #else
2124   LOG(INFO) << "Skipping read_barrier_for_root_slow";
2125   // Force-print to std::cout so it's also outside the logcat.
2126   std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
2127 #endif
2128 }
2129 
2130 }  // namespace art
2131