• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <dirent.h>
18 #include <fstream>
19 #include <sys/types.h>
20 #include <map>
21 
22 #include "gtest/gtest.h"
23 #include "utils/arm/assembler_thumb2.h"
24 #include "base/hex_dump.h"
25 #include "common_runtime_test.h"
26 
27 namespace art {
28 namespace arm {
29 
30 // Include results file (generated manually)
31 #include "assembler_thumb_test_expected.cc.inc"
32 
33 #ifndef HAVE_ANDROID_OS
34 // This controls whether the results are printed to the
35 // screen or compared against the expected output.
36 // To generate new expected output, set this to true and
37 // copy the output into the .cc.inc file in the form
38 // of the other results.
39 //
40 // When this is false, the results are not printed to the
41 // output, but are compared against the expected results
42 // in the .cc.inc file.
43 static constexpr bool kPrintResults = false;
44 #endif
45 
46 static const char* TOOL_PREFIX = "arm-linux-androideabi-";
47 
SetAndroidData()48 void SetAndroidData() {
49   const char* data = getenv("ANDROID_DATA");
50   if (data == nullptr) {
51     setenv("ANDROID_DATA", "/tmp", 1);
52   }
53 }
54 
CompareIgnoringSpace(const char * s1,const char * s2)55 int CompareIgnoringSpace(const char* s1, const char* s2) {
56   while (*s1 != '\0') {
57     while (isspace(*s1)) ++s1;
58     while (isspace(*s2)) ++s2;
59     if (*s1 == '\0' || *s1 != *s2) {
60       break;
61     }
62     ++s1;
63     ++s2;
64   }
65   return *s1 - *s2;
66 }
67 
GetAndroidToolsDir()68 std::string GetAndroidToolsDir() {
69   std::string root;
70   const char* android_build_top = getenv("ANDROID_BUILD_TOP");
71   if (android_build_top != nullptr) {
72     root += android_build_top;
73   } else {
74     // Not set by build server, so default to current directory
75     char* cwd = getcwd(nullptr, 0);
76     setenv("ANDROID_BUILD_TOP", cwd, 1);
77     root += cwd;
78     free(cwd);
79   }
80 
81   // Look for "prebuilts"
82   std::string toolsdir = root;
83   struct stat st;
84   while (toolsdir != "") {
85     std::string prebuilts = toolsdir + "/prebuilts";
86     if (stat(prebuilts.c_str(), &st) == 0) {
87        // Found prebuilts.
88        toolsdir += "/prebuilts/gcc/linux-x86/arm";
89        break;
90     }
91     // Not present, move up one dir.
92     size_t slash = toolsdir.rfind('/');
93     if (slash == std::string::npos) {
94       toolsdir = "";
95     } else {
96       toolsdir = toolsdir.substr(0, slash-1);
97     }
98   }
99   bool statok = stat(toolsdir.c_str(), &st) == 0;
100   if (!statok) {
101     return "";      // Use path.
102   }
103 
104   DIR* dir = opendir(toolsdir.c_str());
105   if (dir == nullptr) {
106     return "";      // Use path.
107   }
108 
109   struct dirent* entry;
110   std::string founddir;
111   double maxversion  = 0;
112 
113   // Find the latest version of the arm-eabi tools (biggest version number).
114   // Suffix on toolsdir will be something like "arm-eabi-4.8"
115   while ((entry = readdir(dir)) != nullptr) {
116     std::string subdir = toolsdir + std::string("/") + std::string(entry->d_name);
117     size_t eabi = subdir.find(TOOL_PREFIX);
118     if (eabi != std::string::npos) {
119       std::string suffix = subdir.substr(eabi + strlen(TOOL_PREFIX));
120       double version = strtod(suffix.c_str(), nullptr);
121       if (version > maxversion) {
122         maxversion = version;
123         founddir = subdir;
124       }
125     }
126   }
127   closedir(dir);
128   bool found = founddir != "";
129   if (!found) {
130     return "";      // Use path.
131   }
132 
133   return founddir + "/bin/";
134 }
135 
dump(std::vector<uint8_t> & code,const char * testname)136 void dump(std::vector<uint8_t>& code, const char* testname) {
137   // This will only work on the host.  There is no as, objcopy or objdump on the
138   // device.
139 #ifndef HAVE_ANDROID_OS
140   static bool results_ok = false;
141   static std::string toolsdir;
142 
143   if (!results_ok) {
144     setup_results();
145     toolsdir = GetAndroidToolsDir();
146     SetAndroidData();
147     results_ok = true;
148   }
149 
150   ScratchFile file;
151 
152   const char* filename = file.GetFilename().c_str();
153 
154   std::ofstream out(filename);
155   if (out) {
156     out << ".section \".text\"\n";
157     out << ".syntax unified\n";
158     out << ".arch armv7-a\n";
159     out << ".thumb\n";
160     out << ".thumb_func\n";
161     out << ".type " << testname << ", #function\n";
162     out << ".global " << testname << "\n";
163     out << testname << ":\n";
164     out << ".fnstart\n";
165 
166     for (uint32_t i = 0 ; i < code.size(); ++i) {
167       out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
168     }
169     out << ".fnend\n";
170     out << ".size " << testname << ", .-" << testname << "\n";
171   }
172   out.close();
173 
174   char cmd[1024];
175 
176   // Assemble the .S
177   snprintf(cmd, sizeof(cmd), "%s%sas %s -o %s.o", toolsdir.c_str(), TOOL_PREFIX, filename, filename);
178   system(cmd);
179 
180   // Remove the $d symbols to prevent the disassembler dumping the instructions
181   // as .word
182   snprintf(cmd, sizeof(cmd), "%s%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), TOOL_PREFIX,
183     filename, filename);
184   system(cmd);
185 
186   // Disassemble.
187 
188   snprintf(cmd, sizeof(cmd), "%s%sobjdump -d %s.oo | grep '^  *[0-9a-f][0-9a-f]*:'",
189     toolsdir.c_str(), TOOL_PREFIX, filename);
190   if (kPrintResults) {
191     // Print the results only, don't check. This is used to generate new output for inserting
192     // into the .inc file.
193     system(cmd);
194   } else {
195     // Check the results match the appropriate results in the .inc file.
196     FILE *fp = popen(cmd, "r");
197     ASSERT_TRUE(fp != nullptr);
198 
199     std::map<std::string, const char**>::iterator results = test_results.find(testname);
200     ASSERT_NE(results, test_results.end());
201 
202     uint32_t lineindex = 0;
203 
204     while (!feof(fp)) {
205       char testline[256];
206       char *s = fgets(testline, sizeof(testline), fp);
207       if (s == nullptr) {
208         break;
209       }
210       if (CompareIgnoringSpace(results->second[lineindex], testline) != 0) {
211         LOG(FATAL) << "Output is not as expected at line: " << lineindex
212           << results->second[lineindex] << "/" << testline;
213       }
214       ++lineindex;
215     }
216     // Check that we are at the end.
217     ASSERT_TRUE(results->second[lineindex] == nullptr);
218     fclose(fp);
219   }
220 
221   char buf[FILENAME_MAX];
222   snprintf(buf, sizeof(buf), "%s.o", filename);
223   unlink(buf);
224 
225   snprintf(buf, sizeof(buf), "%s.oo", filename);
226   unlink(buf);
227 #endif
228 }
229 
230 #define __ assembler->
231 
TEST(Thumb2AssemblerTest,SimpleMov)232 TEST(Thumb2AssemblerTest, SimpleMov) {
233   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
234 
235   __ mov(R0, ShifterOperand(R1));
236   __ mov(R8, ShifterOperand(R9));
237 
238   __ mov(R0, ShifterOperand(1));
239   __ mov(R8, ShifterOperand(9));
240 
241   size_t cs = __ CodeSize();
242   std::vector<uint8_t> managed_code(cs);
243   MemoryRegion code(&managed_code[0], managed_code.size());
244   __ FinalizeInstructions(code);
245   dump(managed_code, "SimpleMov");
246   delete assembler;
247 }
248 
TEST(Thumb2AssemblerTest,SimpleMov32)249 TEST(Thumb2AssemblerTest, SimpleMov32) {
250   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
251   assembler->Force32Bit();
252 
253   __ mov(R0, ShifterOperand(R1));
254   __ mov(R8, ShifterOperand(R9));
255 
256   size_t cs = __ CodeSize();
257   std::vector<uint8_t> managed_code(cs);
258   MemoryRegion code(&managed_code[0], managed_code.size());
259   __ FinalizeInstructions(code);
260   dump(managed_code, "SimpleMov32");
261   delete assembler;
262 }
263 
TEST(Thumb2AssemblerTest,SimpleMovAdd)264 TEST(Thumb2AssemblerTest, SimpleMovAdd) {
265   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
266 
267   __ mov(R0, ShifterOperand(R1));
268   __ add(R0, R1, ShifterOperand(R2));
269   __ add(R0, R1, ShifterOperand());
270 
271   size_t cs = __ CodeSize();
272   std::vector<uint8_t> managed_code(cs);
273   MemoryRegion code(&managed_code[0], managed_code.size());
274   __ FinalizeInstructions(code);
275   dump(managed_code, "SimpleMovAdd");
276   delete assembler;
277 }
278 
TEST(Thumb2AssemblerTest,DataProcessingRegister)279 TEST(Thumb2AssemblerTest, DataProcessingRegister) {
280   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
281 
282   __ mov(R0, ShifterOperand(R1));
283   __ mvn(R0, ShifterOperand(R1));
284 
285   // 32 bit variants.
286   __ add(R0, R1, ShifterOperand(R2));
287   __ sub(R0, R1, ShifterOperand(R2));
288   __ and_(R0, R1, ShifterOperand(R2));
289   __ orr(R0, R1, ShifterOperand(R2));
290   __ eor(R0, R1, ShifterOperand(R2));
291   __ bic(R0, R1, ShifterOperand(R2));
292   __ adc(R0, R1, ShifterOperand(R2));
293   __ sbc(R0, R1, ShifterOperand(R2));
294   __ rsb(R0, R1, ShifterOperand(R2));
295 
296   // 16 bit variants.
297   __ add(R0, R1, ShifterOperand());
298   __ sub(R0, R1, ShifterOperand());
299   __ and_(R0, R1, ShifterOperand());
300   __ orr(R0, R1, ShifterOperand());
301   __ eor(R0, R1, ShifterOperand());
302   __ bic(R0, R1, ShifterOperand());
303   __ adc(R0, R1, ShifterOperand());
304   __ sbc(R0, R1, ShifterOperand());
305   __ rsb(R0, R1, ShifterOperand());
306 
307   __ tst(R0, ShifterOperand(R1));
308   __ teq(R0, ShifterOperand(R1));
309   __ cmp(R0, ShifterOperand(R1));
310   __ cmn(R0, ShifterOperand(R1));
311 
312   __ movs(R0, ShifterOperand(R1));
313   __ mvns(R0, ShifterOperand(R1));
314 
315   // 32 bit variants.
316   __ add(R12, R1, ShifterOperand(R0));
317 
318   size_t cs = __ CodeSize();
319   std::vector<uint8_t> managed_code(cs);
320   MemoryRegion code(&managed_code[0], managed_code.size());
321   __ FinalizeInstructions(code);
322   dump(managed_code, "DataProcessingRegister");
323   delete assembler;
324 }
325 
TEST(Thumb2AssemblerTest,DataProcessingImmediate)326 TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
327   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
328 
329   __ mov(R0, ShifterOperand(0x55));
330   __ mvn(R0, ShifterOperand(0x55));
331   __ add(R0, R1, ShifterOperand(0x55));
332   __ sub(R0, R1, ShifterOperand(0x55));
333   __ and_(R0, R1, ShifterOperand(0x55));
334   __ orr(R0, R1, ShifterOperand(0x55));
335   __ eor(R0, R1, ShifterOperand(0x55));
336   __ bic(R0, R1, ShifterOperand(0x55));
337   __ adc(R0, R1, ShifterOperand(0x55));
338   __ sbc(R0, R1, ShifterOperand(0x55));
339   __ rsb(R0, R1, ShifterOperand(0x55));
340 
341   __ tst(R0, ShifterOperand(0x55));
342   __ teq(R0, ShifterOperand(0x55));
343   __ cmp(R0, ShifterOperand(0x55));
344   __ cmn(R0, ShifterOperand(0x55));
345 
346   __ add(R0, R1, ShifterOperand(5));
347   __ sub(R0, R1, ShifterOperand(5));
348 
349   __ movs(R0, ShifterOperand(0x55));
350   __ mvns(R0, ShifterOperand(0x55));
351 
352   size_t cs = __ CodeSize();
353   std::vector<uint8_t> managed_code(cs);
354   MemoryRegion code(&managed_code[0], managed_code.size());
355   __ FinalizeInstructions(code);
356   dump(managed_code, "DataProcessingImmediate");
357   delete assembler;
358 }
359 
TEST(Thumb2AssemblerTest,DataProcessingModifiedImmediate)360 TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
361   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
362 
363   __ mov(R0, ShifterOperand(0x550055));
364   __ mvn(R0, ShifterOperand(0x550055));
365   __ add(R0, R1, ShifterOperand(0x550055));
366   __ sub(R0, R1, ShifterOperand(0x550055));
367   __ and_(R0, R1, ShifterOperand(0x550055));
368   __ orr(R0, R1, ShifterOperand(0x550055));
369   __ eor(R0, R1, ShifterOperand(0x550055));
370   __ bic(R0, R1, ShifterOperand(0x550055));
371   __ adc(R0, R1, ShifterOperand(0x550055));
372   __ sbc(R0, R1, ShifterOperand(0x550055));
373   __ rsb(R0, R1, ShifterOperand(0x550055));
374 
375   __ tst(R0, ShifterOperand(0x550055));
376   __ teq(R0, ShifterOperand(0x550055));
377   __ cmp(R0, ShifterOperand(0x550055));
378   __ cmn(R0, ShifterOperand(0x550055));
379 
380   size_t cs = __ CodeSize();
381   std::vector<uint8_t> managed_code(cs);
382   MemoryRegion code(&managed_code[0], managed_code.size());
383   __ FinalizeInstructions(code);
384   dump(managed_code, "DataProcessingModifiedImmediate");
385   delete assembler;
386 }
387 
388 
TEST(Thumb2AssemblerTest,DataProcessingModifiedImmediates)389 TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
390   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
391 
392   __ mov(R0, ShifterOperand(0x550055));
393   __ mov(R0, ShifterOperand(0x55005500));
394   __ mov(R0, ShifterOperand(0x55555555));
395   __ mov(R0, ShifterOperand(0xd5000000));       // rotated to first position
396   __ mov(R0, ShifterOperand(0x6a000000));       // rotated to second position
397   __ mov(R0, ShifterOperand(0x350));            // rotated to 2nd last position
398   __ mov(R0, ShifterOperand(0x1a8));            // rotated to last position
399 
400   size_t cs = __ CodeSize();
401   std::vector<uint8_t> managed_code(cs);
402   MemoryRegion code(&managed_code[0], managed_code.size());
403   __ FinalizeInstructions(code);
404   dump(managed_code, "DataProcessingModifiedImmediates");
405   delete assembler;
406 }
407 
TEST(Thumb2AssemblerTest,DataProcessingShiftedRegister)408 TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
409   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
410 
411   __ mov(R3, ShifterOperand(R4, LSL, 4));
412   __ mov(R3, ShifterOperand(R4, LSR, 5));
413   __ mov(R3, ShifterOperand(R4, ASR, 6));
414   __ mov(R3, ShifterOperand(R4, ROR, 7));
415   __ mov(R3, ShifterOperand(R4, ROR));
416 
417   // 32 bit variants.
418   __ mov(R8, ShifterOperand(R4, LSL, 4));
419   __ mov(R8, ShifterOperand(R4, LSR, 5));
420   __ mov(R8, ShifterOperand(R4, ASR, 6));
421   __ mov(R8, ShifterOperand(R4, ROR, 7));
422   __ mov(R8, ShifterOperand(R4, RRX));
423 
424   size_t cs = __ CodeSize();
425   std::vector<uint8_t> managed_code(cs);
426   MemoryRegion code(&managed_code[0], managed_code.size());
427   __ FinalizeInstructions(code);
428   dump(managed_code, "DataProcessingShiftedRegister");
429   delete assembler;
430 }
431 
432 
TEST(Thumb2AssemblerTest,BasicLoad)433 TEST(Thumb2AssemblerTest, BasicLoad) {
434   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
435 
436   __ ldr(R3, Address(R4, 24));
437   __ ldrb(R3, Address(R4, 24));
438   __ ldrh(R3, Address(R4, 24));
439   __ ldrsb(R3, Address(R4, 24));
440   __ ldrsh(R3, Address(R4, 24));
441 
442   __ ldr(R3, Address(SP, 24));
443 
444   // 32 bit variants
445   __ ldr(R8, Address(R4, 24));
446   __ ldrb(R8, Address(R4, 24));
447   __ ldrh(R8, Address(R4, 24));
448   __ ldrsb(R8, Address(R4, 24));
449   __ ldrsh(R8, Address(R4, 24));
450 
451   size_t cs = __ CodeSize();
452   std::vector<uint8_t> managed_code(cs);
453   MemoryRegion code(&managed_code[0], managed_code.size());
454   __ FinalizeInstructions(code);
455   dump(managed_code, "BasicLoad");
456   delete assembler;
457 }
458 
459 
TEST(Thumb2AssemblerTest,BasicStore)460 TEST(Thumb2AssemblerTest, BasicStore) {
461   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
462 
463   __ str(R3, Address(R4, 24));
464   __ strb(R3, Address(R4, 24));
465   __ strh(R3, Address(R4, 24));
466 
467   __ str(R3, Address(SP, 24));
468 
469   // 32 bit variants.
470   __ str(R8, Address(R4, 24));
471   __ strb(R8, Address(R4, 24));
472   __ strh(R8, Address(R4, 24));
473 
474   size_t cs = __ CodeSize();
475   std::vector<uint8_t> managed_code(cs);
476   MemoryRegion code(&managed_code[0], managed_code.size());
477   __ FinalizeInstructions(code);
478   dump(managed_code, "BasicStore");
479   delete assembler;
480 }
481 
TEST(Thumb2AssemblerTest,ComplexLoad)482 TEST(Thumb2AssemblerTest, ComplexLoad) {
483   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
484 
485   __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
486   __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
487   __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
488   __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
489   __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
490   __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
491 
492   __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
493   __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
494   __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
495   __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
496   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
497   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
498 
499   __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
500   __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
501   __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
502   __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
503   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
504   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
505 
506   __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
507   __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
508   __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
509   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
510   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
511   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
512 
513   __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
514   __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
515   __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
516   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
517   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
518   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
519 
520   size_t cs = __ CodeSize();
521   std::vector<uint8_t> managed_code(cs);
522   MemoryRegion code(&managed_code[0], managed_code.size());
523   __ FinalizeInstructions(code);
524   dump(managed_code, "ComplexLoad");
525   delete assembler;
526 }
527 
528 
TEST(Thumb2AssemblerTest,ComplexStore)529 TEST(Thumb2AssemblerTest, ComplexStore) {
530   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
531 
532   __ str(R3, Address(R4, 24, Address::Mode::Offset));
533   __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
534   __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
535   __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
536   __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
537   __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
538 
539   __ strb(R3, Address(R4, 24, Address::Mode::Offset));
540   __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
541   __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
542   __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
543   __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
544   __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
545 
546   __ strh(R3, Address(R4, 24, Address::Mode::Offset));
547   __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
548   __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
549   __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
550   __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
551   __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
552 
553   size_t cs = __ CodeSize();
554   std::vector<uint8_t> managed_code(cs);
555   MemoryRegion code(&managed_code[0], managed_code.size());
556   __ FinalizeInstructions(code);
557   dump(managed_code, "ComplexStore");
558   delete assembler;
559 }
560 
TEST(Thumb2AssemblerTest,NegativeLoadStore)561 TEST(Thumb2AssemblerTest, NegativeLoadStore) {
562   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
563 
564   __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
565   __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
566   __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
567   __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
568   __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
569   __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
570 
571   __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
572   __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
573   __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
574   __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
575   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
576   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
577 
578   __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
579   __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
580   __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
581   __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
582   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
583   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
584 
585   __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
586   __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
587   __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
588   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
589   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
590   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
591 
592   __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
593   __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
594   __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
595   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
596   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
597   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
598 
599   __ str(R3, Address(R4, -24, Address::Mode::Offset));
600   __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
601   __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
602   __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
603   __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
604   __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
605 
606   __ strb(R3, Address(R4, -24, Address::Mode::Offset));
607   __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
608   __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
609   __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
610   __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
611   __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
612 
613   __ strh(R3, Address(R4, -24, Address::Mode::Offset));
614   __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
615   __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
616   __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
617   __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
618   __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
619 
620   size_t cs = __ CodeSize();
621   std::vector<uint8_t> managed_code(cs);
622   MemoryRegion code(&managed_code[0], managed_code.size());
623   __ FinalizeInstructions(code);
624   dump(managed_code, "NegativeLoadStore");
625   delete assembler;
626 }
627 
TEST(Thumb2AssemblerTest,SimpleLoadStoreDual)628 TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
629   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
630 
631   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
632   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
633 
634   size_t cs = __ CodeSize();
635   std::vector<uint8_t> managed_code(cs);
636   MemoryRegion code(&managed_code[0], managed_code.size());
637   __ FinalizeInstructions(code);
638   dump(managed_code, "SimpleLoadStoreDual");
639   delete assembler;
640 }
641 
TEST(Thumb2AssemblerTest,ComplexLoadStoreDual)642 TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
643   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
644 
645   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
646   __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
647   __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
648   __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
649   __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
650   __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
651 
652   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
653   __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
654   __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
655   __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
656   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
657   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
658 
659   size_t cs = __ CodeSize();
660   std::vector<uint8_t> managed_code(cs);
661   MemoryRegion code(&managed_code[0], managed_code.size());
662   __ FinalizeInstructions(code);
663   dump(managed_code, "ComplexLoadStoreDual");
664   delete assembler;
665 }
666 
TEST(Thumb2AssemblerTest,NegativeLoadStoreDual)667 TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
668   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
669 
670   __ strd(R2, Address(R0, -24, Address::Mode::Offset));
671   __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
672   __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
673   __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
674   __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
675   __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
676 
677   __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
678   __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
679   __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
680   __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
681   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
682   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
683 
684   size_t cs = __ CodeSize();
685   std::vector<uint8_t> managed_code(cs);
686   MemoryRegion code(&managed_code[0], managed_code.size());
687   __ FinalizeInstructions(code);
688   dump(managed_code, "NegativeLoadStoreDual");
689   delete assembler;
690 }
691 
TEST(Thumb2AssemblerTest,SimpleBranch)692 TEST(Thumb2AssemblerTest, SimpleBranch) {
693   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
694 
695   Label l1;
696   __ mov(R0, ShifterOperand(2));
697   __ Bind(&l1);
698   __ mov(R1, ShifterOperand(1));
699   __ b(&l1);
700   Label l2;
701   __ b(&l2);
702   __ mov(R1, ShifterOperand(2));
703   __ Bind(&l2);
704   __ mov(R0, ShifterOperand(3));
705 
706   Label l3;
707   __ mov(R0, ShifterOperand(2));
708   __ Bind(&l3);
709   __ mov(R1, ShifterOperand(1));
710   __ b(&l3, EQ);
711 
712   Label l4;
713   __ b(&l4, EQ);
714   __ mov(R1, ShifterOperand(2));
715   __ Bind(&l4);
716   __ mov(R0, ShifterOperand(3));
717 
718   // 2 linked labels.
719   Label l5;
720   __ b(&l5);
721   __ mov(R1, ShifterOperand(4));
722   __ b(&l5);
723   __ mov(R1, ShifterOperand(5));
724   __ Bind(&l5);
725   __ mov(R0, ShifterOperand(6));
726 
727   size_t cs = __ CodeSize();
728   std::vector<uint8_t> managed_code(cs);
729   MemoryRegion code(&managed_code[0], managed_code.size());
730   __ FinalizeInstructions(code);
731   dump(managed_code, "SimpleBranch");
732   delete assembler;
733 }
734 
TEST(Thumb2AssemblerTest,LongBranch)735 TEST(Thumb2AssemblerTest, LongBranch) {
736   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
737   assembler->Force32Bit();
738   // 32 bit branches.
739   Label l1;
740   __ mov(R0, ShifterOperand(2));
741   __ Bind(&l1);
742   __ mov(R1, ShifterOperand(1));
743   __ b(&l1);
744 
745   Label l2;
746   __ b(&l2);
747   __ mov(R1, ShifterOperand(2));
748   __ Bind(&l2);
749   __ mov(R0, ShifterOperand(3));
750 
751   Label l3;
752   __ mov(R0, ShifterOperand(2));
753   __ Bind(&l3);
754   __ mov(R1, ShifterOperand(1));
755   __ b(&l3, EQ);
756 
757   Label l4;
758   __ b(&l4, EQ);
759   __ mov(R1, ShifterOperand(2));
760   __ Bind(&l4);
761   __ mov(R0, ShifterOperand(3));
762 
763   // 2 linked labels.
764   Label l5;
765   __ b(&l5);
766   __ mov(R1, ShifterOperand(4));
767   __ b(&l5);
768   __ mov(R1, ShifterOperand(5));
769   __ Bind(&l5);
770   __ mov(R0, ShifterOperand(6));
771 
772   size_t cs = __ CodeSize();
773   std::vector<uint8_t> managed_code(cs);
774   MemoryRegion code(&managed_code[0], managed_code.size());
775   __ FinalizeInstructions(code);
776   dump(managed_code, "LongBranch");
777   delete assembler;
778 }
779 
TEST(Thumb2AssemblerTest,LoadMultiple)780 TEST(Thumb2AssemblerTest, LoadMultiple) {
781   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
782 
783   // 16 bit.
784   __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
785 
786   // 32 bit.
787   __ ldm(DB_W, R4, (1 << LR | 1 << R11));
788   __ ldm(DB, R4, (1 << LR | 1 << R11));
789 
790   // Single reg is converted to ldr
791   __ ldm(DB_W, R4, (1 << R5));
792 
793   size_t cs = __ CodeSize();
794   std::vector<uint8_t> managed_code(cs);
795   MemoryRegion code(&managed_code[0], managed_code.size());
796   __ FinalizeInstructions(code);
797   dump(managed_code, "LoadMultiple");
798   delete assembler;
799 }
800 
TEST(Thumb2AssemblerTest,StoreMultiple)801 TEST(Thumb2AssemblerTest, StoreMultiple) {
802   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
803 
804   // 16 bit.
805   __ stm(IA_W, R4, (1 << R0 | 1 << R3));
806 
807   // 32 bit.
808   __ stm(IA_W, R4, (1 << LR | 1 << R11));
809   __ stm(IA, R4, (1 << LR | 1 << R11));
810 
811   // Single reg is converted to str
812   __ stm(IA_W, R4, (1 << R5));
813   __ stm(IA, R4, (1 << R5));
814 
815   size_t cs = __ CodeSize();
816   std::vector<uint8_t> managed_code(cs);
817   MemoryRegion code(&managed_code[0], managed_code.size());
818   __ FinalizeInstructions(code);
819   dump(managed_code, "StoreMultiple");
820   delete assembler;
821 }
822 
TEST(Thumb2AssemblerTest,MovWMovT)823 TEST(Thumb2AssemblerTest, MovWMovT) {
824   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
825 
826   __ movw(R4, 0);         // 16 bit.
827   __ movw(R4, 0x34);      // 16 bit.
828   __ movw(R9, 0x34);      // 32 bit due to high register.
829   __ movw(R3, 0x1234);    // 32 bit due to large value.
830   __ movw(R9, 0xffff);    // 32 bit due to large value and high register.
831 
832   // Always 32 bit.
833   __ movt(R0, 0);
834   __ movt(R0, 0x1234);
835   __ movt(R1, 0xffff);
836 
837   size_t cs = __ CodeSize();
838   std::vector<uint8_t> managed_code(cs);
839   MemoryRegion code(&managed_code[0], managed_code.size());
840   __ FinalizeInstructions(code);
841   dump(managed_code, "MovWMovT");
842   delete assembler;
843 }
844 
TEST(Thumb2AssemblerTest,SpecialAddSub)845 TEST(Thumb2AssemblerTest, SpecialAddSub) {
846   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
847 
848   __ add(R2, SP, ShifterOperand(0x50));   // 16 bit.
849   __ add(SP, SP, ShifterOperand(0x50));   // 16 bit.
850   __ add(R8, SP, ShifterOperand(0x50));   // 32 bit.
851 
852   __ add(R2, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
853   __ add(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
854 
855   __ sub(SP, SP, ShifterOperand(0x50));     // 16 bit
856   __ sub(R0, SP, ShifterOperand(0x50));     // 32 bit
857   __ sub(R8, SP, ShifterOperand(0x50));     // 32 bit.
858 
859   __ sub(SP, SP, ShifterOperand(0xf00));   // 32 bit due to imm size
860 
861   size_t cs = __ CodeSize();
862   std::vector<uint8_t> managed_code(cs);
863   MemoryRegion code(&managed_code[0], managed_code.size());
864   __ FinalizeInstructions(code);
865   dump(managed_code, "SpecialAddSub");
866   delete assembler;
867 }
868 
TEST(Thumb2AssemblerTest,StoreToOffset)869 TEST(Thumb2AssemblerTest, StoreToOffset) {
870   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
871 
872   __ StoreToOffset(kStoreWord, R2, R4, 12);     // Simple
873   __ StoreToOffset(kStoreWord, R2, R4, 0x2000);     // Offset too big.
874   __ StoreToOffset(kStoreWord, R0, R12, 12);
875   __ StoreToOffset(kStoreHalfword, R0, R12, 12);
876   __ StoreToOffset(kStoreByte, R2, R12, 12);
877 
878   size_t cs = __ CodeSize();
879   std::vector<uint8_t> managed_code(cs);
880   MemoryRegion code(&managed_code[0], managed_code.size());
881   __ FinalizeInstructions(code);
882   dump(managed_code, "StoreToOffset");
883   delete assembler;
884 }
885 
886 
TEST(Thumb2AssemblerTest,IfThen)887 TEST(Thumb2AssemblerTest, IfThen) {
888   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
889 
890   __ it(EQ);
891   __ mov(R1, ShifterOperand(1), EQ);
892 
893   __ it(EQ, kItThen);
894   __ mov(R1, ShifterOperand(1), EQ);
895   __ mov(R2, ShifterOperand(2), EQ);
896 
897   __ it(EQ, kItElse);
898   __ mov(R1, ShifterOperand(1), EQ);
899   __ mov(R2, ShifterOperand(2), NE);
900 
901   __ it(EQ, kItThen, kItElse);
902   __ mov(R1, ShifterOperand(1), EQ);
903   __ mov(R2, ShifterOperand(2), EQ);
904   __ mov(R3, ShifterOperand(3), NE);
905 
906   __ it(EQ, kItElse, kItElse);
907   __ mov(R1, ShifterOperand(1), EQ);
908   __ mov(R2, ShifterOperand(2), NE);
909   __ mov(R3, ShifterOperand(3), NE);
910 
911   __ it(EQ, kItThen, kItThen, kItElse);
912   __ mov(R1, ShifterOperand(1), EQ);
913   __ mov(R2, ShifterOperand(2), EQ);
914   __ mov(R3, ShifterOperand(3), EQ);
915   __ mov(R4, ShifterOperand(4), NE);
916 
917   size_t cs = __ CodeSize();
918   std::vector<uint8_t> managed_code(cs);
919   MemoryRegion code(&managed_code[0], managed_code.size());
920   __ FinalizeInstructions(code);
921   dump(managed_code, "IfThen");
922   delete assembler;
923 }
924 
TEST(Thumb2AssemblerTest,CbzCbnz)925 TEST(Thumb2AssemblerTest, CbzCbnz) {
926   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
927 
928   Label l1;
929   __ cbz(R2, &l1);
930   __ mov(R1, ShifterOperand(3));
931   __ mov(R2, ShifterOperand(3));
932   __ Bind(&l1);
933   __ mov(R2, ShifterOperand(4));
934 
935   Label l2;
936   __ cbnz(R2, &l2);
937   __ mov(R8, ShifterOperand(3));
938   __ mov(R2, ShifterOperand(3));
939   __ Bind(&l2);
940   __ mov(R2, ShifterOperand(4));
941 
942   size_t cs = __ CodeSize();
943   std::vector<uint8_t> managed_code(cs);
944   MemoryRegion code(&managed_code[0], managed_code.size());
945   __ FinalizeInstructions(code);
946   dump(managed_code, "CbzCbnz");
947   delete assembler;
948 }
949 
TEST(Thumb2AssemblerTest,Multiply)950 TEST(Thumb2AssemblerTest, Multiply) {
951   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
952 
953   __ mul(R0, R1, R0);
954   __ mul(R0, R1, R2);
955   __ mul(R8, R9, R8);
956   __ mul(R8, R9, R10);
957 
958   __ mla(R0, R1, R2, R3);
959   __ mla(R8, R9, R8, R9);
960 
961   __ mls(R0, R1, R2, R3);
962   __ mls(R8, R9, R8, R9);
963 
964   __ umull(R0, R1, R2, R3);
965   __ umull(R8, R9, R10, R11);
966 
967   size_t cs = __ CodeSize();
968   std::vector<uint8_t> managed_code(cs);
969   MemoryRegion code(&managed_code[0], managed_code.size());
970   __ FinalizeInstructions(code);
971   dump(managed_code, "Multiply");
972   delete assembler;
973 }
974 
TEST(Thumb2AssemblerTest,Divide)975 TEST(Thumb2AssemblerTest, Divide) {
976   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
977 
978   __ sdiv(R0, R1, R2);
979   __ sdiv(R8, R9, R10);
980 
981   __ udiv(R0, R1, R2);
982   __ udiv(R8, R9, R10);
983 
984   size_t cs = __ CodeSize();
985   std::vector<uint8_t> managed_code(cs);
986   MemoryRegion code(&managed_code[0], managed_code.size());
987   __ FinalizeInstructions(code);
988   dump(managed_code, "Divide");
989   delete assembler;
990 }
991 
TEST(Thumb2AssemblerTest,VMov)992 TEST(Thumb2AssemblerTest, VMov) {
993   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
994 
995   __ vmovs(S1, 1.0);
996   __ vmovd(D1, 1.0);
997 
998   __ vmovs(S1, S2);
999   __ vmovd(D1, D2);
1000 
1001   size_t cs = __ CodeSize();
1002   std::vector<uint8_t> managed_code(cs);
1003   MemoryRegion code(&managed_code[0], managed_code.size());
1004   __ FinalizeInstructions(code);
1005   dump(managed_code, "VMov");
1006   delete assembler;
1007 }
1008 
1009 
TEST(Thumb2AssemblerTest,BasicFloatingPoint)1010 TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
1011   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1012 
1013   __ vadds(S0, S1, S2);
1014   __ vsubs(S0, S1, S2);
1015   __ vmuls(S0, S1, S2);
1016   __ vmlas(S0, S1, S2);
1017   __ vmlss(S0, S1, S2);
1018   __ vdivs(S0, S1, S2);
1019   __ vabss(S0, S1);
1020   __ vnegs(S0, S1);
1021   __ vsqrts(S0, S1);
1022 
1023   __ vaddd(D0, D1, D2);
1024   __ vsubd(D0, D1, D2);
1025   __ vmuld(D0, D1, D2);
1026   __ vmlad(D0, D1, D2);
1027   __ vmlsd(D0, D1, D2);
1028   __ vdivd(D0, D1, D2);
1029   __ vabsd(D0, D1);
1030   __ vnegd(D0, D1);
1031   __ vsqrtd(D0, D1);
1032 
1033   size_t cs = __ CodeSize();
1034   std::vector<uint8_t> managed_code(cs);
1035   MemoryRegion code(&managed_code[0], managed_code.size());
1036   __ FinalizeInstructions(code);
1037   dump(managed_code, "BasicFloatingPoint");
1038   delete assembler;
1039 }
1040 
TEST(Thumb2AssemblerTest,FloatingPointConversions)1041 TEST(Thumb2AssemblerTest, FloatingPointConversions) {
1042   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1043 
1044   __ vcvtsd(S2, D2);
1045   __ vcvtds(D2, S2);
1046 
1047   __ vcvtis(S1, S2);
1048   __ vcvtsi(S1, S2);
1049 
1050   __ vcvtid(S1, D2);
1051   __ vcvtdi(D1, S2);
1052 
1053   __ vcvtus(S1, S2);
1054   __ vcvtsu(S1, S2);
1055 
1056   __ vcvtud(S1, D2);
1057   __ vcvtdu(D1, S2);
1058 
1059   size_t cs = __ CodeSize();
1060   std::vector<uint8_t> managed_code(cs);
1061   MemoryRegion code(&managed_code[0], managed_code.size());
1062   __ FinalizeInstructions(code);
1063   dump(managed_code, "FloatingPointConversions");
1064   delete assembler;
1065 }
1066 
TEST(Thumb2AssemblerTest,FloatingPointComparisons)1067 TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
1068   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1069 
1070   __ vcmps(S0, S1);
1071   __ vcmpd(D0, D1);
1072 
1073   __ vcmpsz(S2);
1074   __ vcmpdz(D2);
1075 
1076   size_t cs = __ CodeSize();
1077   std::vector<uint8_t> managed_code(cs);
1078   MemoryRegion code(&managed_code[0], managed_code.size());
1079   __ FinalizeInstructions(code);
1080   dump(managed_code, "FloatingPointComparisons");
1081   delete assembler;
1082 }
1083 
TEST(Thumb2AssemblerTest,Calls)1084 TEST(Thumb2AssemblerTest, Calls) {
1085   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1086 
1087   __ blx(LR);
1088   __ bx(LR);
1089 
1090   size_t cs = __ CodeSize();
1091   std::vector<uint8_t> managed_code(cs);
1092   MemoryRegion code(&managed_code[0], managed_code.size());
1093   __ FinalizeInstructions(code);
1094   dump(managed_code, "Calls");
1095   delete assembler;
1096 }
1097 
TEST(Thumb2AssemblerTest,Breakpoint)1098 TEST(Thumb2AssemblerTest, Breakpoint) {
1099   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1100 
1101   __ bkpt(0);
1102 
1103   size_t cs = __ CodeSize();
1104   std::vector<uint8_t> managed_code(cs);
1105   MemoryRegion code(&managed_code[0], managed_code.size());
1106   __ FinalizeInstructions(code);
1107   dump(managed_code, "Breakpoint");
1108   delete assembler;
1109 }
1110 
TEST(Thumb2AssemblerTest,StrR1)1111 TEST(Thumb2AssemblerTest, StrR1) {
1112   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1113 
1114   __ str(R1, Address(SP, 68));
1115   __ str(R1, Address(SP, 1068));
1116 
1117   size_t cs = __ CodeSize();
1118   std::vector<uint8_t> managed_code(cs);
1119   MemoryRegion code(&managed_code[0], managed_code.size());
1120   __ FinalizeInstructions(code);
1121   dump(managed_code, "StrR1");
1122   delete assembler;
1123 }
1124 
TEST(Thumb2AssemblerTest,VPushPop)1125 TEST(Thumb2AssemblerTest, VPushPop) {
1126   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1127 
1128   __ vpushs(S2, 4);
1129   __ vpushd(D2, 4);
1130 
1131   __ vpops(S2, 4);
1132   __ vpopd(D2, 4);
1133 
1134   size_t cs = __ CodeSize();
1135   std::vector<uint8_t> managed_code(cs);
1136   MemoryRegion code(&managed_code[0], managed_code.size());
1137   __ FinalizeInstructions(code);
1138   dump(managed_code, "VPushPop");
1139   delete assembler;
1140 }
1141 
TEST(Thumb2AssemblerTest,Max16BitBranch)1142 TEST(Thumb2AssemblerTest, Max16BitBranch) {
1143   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1144 
1145   Label l1;
1146   __ b(&l1);
1147   for (int i = 0 ; i < (1 << 11) ; i += 2) {
1148     __ mov(R3, ShifterOperand(i & 0xff));
1149   }
1150   __ Bind(&l1);
1151   __ mov(R1, ShifterOperand(R2));
1152 
1153   size_t cs = __ CodeSize();
1154   std::vector<uint8_t> managed_code(cs);
1155   MemoryRegion code(&managed_code[0], managed_code.size());
1156   __ FinalizeInstructions(code);
1157   dump(managed_code, "Max16BitBranch");
1158   delete assembler;
1159 }
1160 
TEST(Thumb2AssemblerTest,Branch32)1161 TEST(Thumb2AssemblerTest, Branch32) {
1162   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1163 
1164   Label l1;
1165   __ b(&l1);
1166   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1167     __ mov(R3, ShifterOperand(i & 0xff));
1168   }
1169   __ Bind(&l1);
1170   __ mov(R1, ShifterOperand(R2));
1171 
1172   size_t cs = __ CodeSize();
1173   std::vector<uint8_t> managed_code(cs);
1174   MemoryRegion code(&managed_code[0], managed_code.size());
1175   __ FinalizeInstructions(code);
1176   dump(managed_code, "Branch32");
1177   delete assembler;
1178 }
1179 
TEST(Thumb2AssemblerTest,CompareAndBranchMax)1180 TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
1181   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1182 
1183   Label l1;
1184   __ cbz(R4, &l1);
1185   for (int i = 0 ; i < (1 << 7) ; i += 2) {
1186     __ mov(R3, ShifterOperand(i & 0xff));
1187   }
1188   __ Bind(&l1);
1189   __ mov(R1, ShifterOperand(R2));
1190 
1191   size_t cs = __ CodeSize();
1192   std::vector<uint8_t> managed_code(cs);
1193   MemoryRegion code(&managed_code[0], managed_code.size());
1194   __ FinalizeInstructions(code);
1195   dump(managed_code, "CompareAndBranchMax");
1196   delete assembler;
1197 }
1198 
TEST(Thumb2AssemblerTest,CompareAndBranchRelocation16)1199 TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1200   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1201 
1202   Label l1;
1203   __ cbz(R4, &l1);
1204   for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1205     __ mov(R3, ShifterOperand(i & 0xff));
1206   }
1207   __ Bind(&l1);
1208   __ mov(R1, ShifterOperand(R2));
1209 
1210   size_t cs = __ CodeSize();
1211   std::vector<uint8_t> managed_code(cs);
1212   MemoryRegion code(&managed_code[0], managed_code.size());
1213   __ FinalizeInstructions(code);
1214   dump(managed_code, "CompareAndBranchRelocation16");
1215   delete assembler;
1216 }
1217 
TEST(Thumb2AssemblerTest,CompareAndBranchRelocation32)1218 TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1219   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1220 
1221   Label l1;
1222   __ cbz(R4, &l1);
1223   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1224     __ mov(R3, ShifterOperand(i & 0xff));
1225   }
1226   __ Bind(&l1);
1227   __ mov(R1, ShifterOperand(R2));
1228 
1229   size_t cs = __ CodeSize();
1230   std::vector<uint8_t> managed_code(cs);
1231   MemoryRegion code(&managed_code[0], managed_code.size());
1232   __ FinalizeInstructions(code);
1233   dump(managed_code, "CompareAndBranchRelocation32");
1234   delete assembler;
1235 }
1236 
TEST(Thumb2AssemblerTest,MixedBranch32)1237 TEST(Thumb2AssemblerTest, MixedBranch32) {
1238   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1239 
1240   Label l1;
1241   Label l2;
1242   __ b(&l1);      // Forwards.
1243   __ Bind(&l2);
1244 
1245   // Space to force relocation.
1246   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1247     __ mov(R3, ShifterOperand(i & 0xff));
1248   }
1249   __ b(&l2);      // Backwards.
1250   __ Bind(&l1);
1251   __ mov(R1, ShifterOperand(R2));
1252 
1253   size_t cs = __ CodeSize();
1254   std::vector<uint8_t> managed_code(cs);
1255   MemoryRegion code(&managed_code[0], managed_code.size());
1256   __ FinalizeInstructions(code);
1257   dump(managed_code, "MixedBranch32");
1258   delete assembler;
1259 }
1260 
TEST(Thumb2AssemblerTest,Shifts)1261 TEST(Thumb2AssemblerTest, Shifts) {
1262   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1263 
1264   // 16 bit
1265   __ Lsl(R0, R1, 5);
1266   __ Lsr(R0, R1, 5);
1267   __ Asr(R0, R1, 5);
1268 
1269   __ Lsl(R0, R0, R1);
1270   __ Lsr(R0, R0, R1);
1271   __ Asr(R0, R0, R1);
1272 
1273   // 32 bit due to high registers.
1274   __ Lsl(R8, R1, 5);
1275   __ Lsr(R0, R8, 5);
1276   __ Asr(R8, R1, 5);
1277   __ Ror(R0, R8, 5);
1278 
1279   // 32 bit due to different Rd and Rn.
1280   __ Lsl(R0, R1, R2);
1281   __ Lsr(R0, R1, R2);
1282   __ Asr(R0, R1, R2);
1283   __ Ror(R0, R1, R2);
1284 
1285   // 32 bit due to use of high registers.
1286   __ Lsl(R8, R1, R2);
1287   __ Lsr(R0, R8, R2);
1288   __ Asr(R0, R1, R8);
1289 
1290   // S bit (all 32 bit)
1291 
1292   // 32 bit due to high registers.
1293   __ Lsl(R8, R1, 5, true);
1294   __ Lsr(R0, R8, 5, true);
1295   __ Asr(R8, R1, 5, true);
1296   __ Ror(R0, R8, 5, true);
1297 
1298   // 32 bit due to different Rd and Rn.
1299   __ Lsl(R0, R1, R2, true);
1300   __ Lsr(R0, R1, R2, true);
1301   __ Asr(R0, R1, R2, true);
1302   __ Ror(R0, R1, R2, true);
1303 
1304   // 32 bit due to use of high registers.
1305   __ Lsl(R8, R1, R2, true);
1306   __ Lsr(R0, R8, R2, true);
1307   __ Asr(R0, R1, R8, true);
1308 
1309   size_t cs = __ CodeSize();
1310   std::vector<uint8_t> managed_code(cs);
1311   MemoryRegion code(&managed_code[0], managed_code.size());
1312   __ FinalizeInstructions(code);
1313   dump(managed_code, "Shifts");
1314   delete assembler;
1315 }
1316 
TEST(Thumb2AssemblerTest,LoadStoreRegOffset)1317 TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
1318   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1319 
1320   // 16 bit.
1321   __ ldr(R0, Address(R1, R2));
1322   __ str(R0, Address(R1, R2));
1323 
1324   // 32 bit due to shift.
1325   __ ldr(R0, Address(R1, R2, LSL, 1));
1326   __ str(R0, Address(R1, R2, LSL, 1));
1327 
1328   __ ldr(R0, Address(R1, R2, LSL, 3));
1329   __ str(R0, Address(R1, R2, LSL, 3));
1330 
1331   // 32 bit due to high register use.
1332   __ ldr(R8, Address(R1, R2));
1333   __ str(R8, Address(R1, R2));
1334 
1335   __ ldr(R1, Address(R8, R2));
1336   __ str(R2, Address(R8, R2));
1337 
1338   __ ldr(R0, Address(R1, R8));
1339   __ str(R0, Address(R1, R8));
1340 
1341   size_t cs = __ CodeSize();
1342   std::vector<uint8_t> managed_code(cs);
1343   MemoryRegion code(&managed_code[0], managed_code.size());
1344   __ FinalizeInstructions(code);
1345   dump(managed_code, "LoadStoreRegOffset");
1346   delete assembler;
1347 }
1348 
TEST(Thumb2AssemblerTest,LoadStoreLiteral)1349 TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
1350   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1351 
1352   __ ldr(R0, Address(4));
1353   __ str(R0, Address(4));
1354 
1355   __ ldr(R0, Address(-8));
1356   __ str(R0, Address(-8));
1357 
1358   // Limits.
1359   __ ldr(R0, Address(0x3ff));       // 10 bits (16 bit).
1360   __ ldr(R0, Address(0x7ff));       // 11 bits (32 bit).
1361   __ str(R0, Address(0x3ff));       // 32 bit (no 16 bit str(literal)).
1362   __ str(R0, Address(0x7ff));       // 11 bits (32 bit).
1363 
1364   size_t cs = __ CodeSize();
1365   std::vector<uint8_t> managed_code(cs);
1366   MemoryRegion code(&managed_code[0], managed_code.size());
1367   __ FinalizeInstructions(code);
1368   dump(managed_code, "LoadStoreLiteral");
1369   delete assembler;
1370 }
1371 
TEST(Thumb2AssemblerTest,LoadStoreLimits)1372 TEST(Thumb2AssemblerTest, LoadStoreLimits) {
1373   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1374 
1375   __ ldr(R0, Address(R4, 124));     // 16 bit.
1376   __ ldr(R0, Address(R4, 128));     // 32 bit.
1377 
1378   __ ldrb(R0, Address(R4, 31));     // 16 bit.
1379   __ ldrb(R0, Address(R4, 32));     // 32 bit.
1380 
1381   __ ldrh(R0, Address(R4, 62));     // 16 bit.
1382   __ ldrh(R0, Address(R4, 64));     // 32 bit.
1383 
1384   __ ldrsb(R0, Address(R4, 31));     // 32 bit.
1385   __ ldrsb(R0, Address(R4, 32));     // 32 bit.
1386 
1387   __ ldrsh(R0, Address(R4, 62));     // 32 bit.
1388   __ ldrsh(R0, Address(R4, 64));     // 32 bit.
1389 
1390   __ str(R0, Address(R4, 124));     // 16 bit.
1391   __ str(R0, Address(R4, 128));     // 32 bit.
1392 
1393   __ strb(R0, Address(R4, 31));     // 16 bit.
1394   __ strb(R0, Address(R4, 32));     // 32 bit.
1395 
1396   __ strh(R0, Address(R4, 62));     // 16 bit.
1397   __ strh(R0, Address(R4, 64));     // 32 bit.
1398 
1399   size_t cs = __ CodeSize();
1400   std::vector<uint8_t> managed_code(cs);
1401   MemoryRegion code(&managed_code[0], managed_code.size());
1402   __ FinalizeInstructions(code);
1403   dump(managed_code, "LoadStoreLimits");
1404   delete assembler;
1405 }
1406 
1407 #undef __
1408 }  // namespace arm
1409 }  // namespace art
1410