• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <dirent.h>
18 #include <errno.h>
19 #include <fstream>
20 #include <map>
21 #include <string.h>
22 #include <sys/types.h>
23 
24 #include "gtest/gtest.h"
25 #include "utils/arm/assembler_thumb2.h"
26 #include "base/hex_dump.h"
27 #include "common_runtime_test.h"
28 
29 namespace art {
30 namespace arm {
31 
32 // Include results file (generated manually)
33 #include "assembler_thumb_test_expected.cc.inc"
34 
35 #ifndef __ANDROID__
36 // This controls whether the results are printed to the
37 // screen or compared against the expected output.
38 // To generate new expected output, set this to true and
39 // copy the output into the .cc.inc file in the form
40 // of the other results.
41 //
42 // When this is false, the results are not printed to the
43 // output, but are compared against the expected results
44 // in the .cc.inc file.
45 static constexpr bool kPrintResults = false;
46 #endif
47 
SetAndroidData()48 void SetAndroidData() {
49   const char* data = getenv("ANDROID_DATA");
50   if (data == nullptr) {
51     setenv("ANDROID_DATA", "/tmp", 1);
52   }
53 }
54 
CompareIgnoringSpace(const char * s1,const char * s2)55 int CompareIgnoringSpace(const char* s1, const char* s2) {
56   while (*s1 != '\0') {
57     while (isspace(*s1)) ++s1;
58     while (isspace(*s2)) ++s2;
59     if (*s1 == '\0' || *s1 != *s2) {
60       break;
61     }
62     ++s1;
63     ++s2;
64   }
65   return *s1 - *s2;
66 }
67 
InitResults()68 void InitResults() {
69   if (test_results.empty()) {
70     setup_results();
71   }
72 }
73 
GetToolsDir()74 std::string GetToolsDir() {
75 #ifndef __ANDROID__
76   // This will only work on the host.  There is no as, objcopy or objdump on the device.
77   static std::string toolsdir;
78 
79   if (toolsdir.empty()) {
80     setup_results();
81     toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
82     SetAndroidData();
83   }
84 
85   return toolsdir;
86 #else
87   return std::string();
88 #endif
89 }
90 
DumpAndCheck(std::vector<uint8_t> & code,const char * testname,const char * const * results)91 void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
92 #ifndef __ANDROID__
93   static std::string toolsdir = GetToolsDir();
94 
95   ScratchFile file;
96 
97   const char* filename = file.GetFilename().c_str();
98 
99   std::ofstream out(filename);
100   if (out) {
101     out << ".section \".text\"\n";
102     out << ".syntax unified\n";
103     out << ".arch armv7-a\n";
104     out << ".thumb\n";
105     out << ".thumb_func\n";
106     out << ".type " << testname << ", #function\n";
107     out << ".global " << testname << "\n";
108     out << testname << ":\n";
109     out << ".fnstart\n";
110 
111     for (uint32_t i = 0 ; i < code.size(); ++i) {
112       out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
113     }
114     out << ".fnend\n";
115     out << ".size " << testname << ", .-" << testname << "\n";
116   }
117   out.close();
118 
119   char cmd[1024];
120 
121   // Assemble the .S
122   snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
123   int cmd_result = system(cmd);
124   ASSERT_EQ(cmd_result, 0) << strerror(errno);
125 
126   // Remove the $d symbols to prevent the disassembler dumping the instructions
127   // as .word
128   snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
129   int cmd_result2 = system(cmd);
130   ASSERT_EQ(cmd_result2, 0) << strerror(errno);
131 
132   // Disassemble.
133 
134   snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^  *[0-9a-f][0-9a-f]*:'",
135     toolsdir.c_str(), filename);
136   if (kPrintResults) {
137     // Print the results only, don't check. This is used to generate new output for inserting
138     // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
139     strcat(cmd, " | sed '-es/^/  \"/' | sed '-es/$/\\\\n\",/'");
140     int cmd_result3 = system(cmd);
141     ASSERT_EQ(cmd_result3, 0) << strerror(errno);
142   } else {
143     // Check the results match the appropriate results in the .inc file.
144     FILE *fp = popen(cmd, "r");
145     ASSERT_TRUE(fp != nullptr);
146 
147     uint32_t lineindex = 0;
148 
149     while (!feof(fp)) {
150       char testline[256];
151       char *s = fgets(testline, sizeof(testline), fp);
152       if (s == nullptr) {
153         break;
154       }
155       if (CompareIgnoringSpace(results[lineindex], testline) != 0) {
156         LOG(FATAL) << "Output is not as expected at line: " << lineindex
157           << results[lineindex] << "/" << testline;
158       }
159       ++lineindex;
160     }
161     // Check that we are at the end.
162     ASSERT_TRUE(results[lineindex] == nullptr);
163     fclose(fp);
164   }
165 
166   char buf[FILENAME_MAX];
167   snprintf(buf, sizeof(buf), "%s.o", filename);
168   unlink(buf);
169 
170   snprintf(buf, sizeof(buf), "%s.oo", filename);
171   unlink(buf);
172 #endif
173 }
174 
175 #define __ assembler->
176 
EmitAndCheck(arm::Thumb2Assembler * assembler,const char * testname,const char * const * results)177 void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
178                   const char* const* results) {
179   __ FinalizeCode();
180   size_t cs = __ CodeSize();
181   std::vector<uint8_t> managed_code(cs);
182   MemoryRegion code(&managed_code[0], managed_code.size());
183   __ FinalizeInstructions(code);
184 
185   DumpAndCheck(managed_code, testname, results);
186 }
187 
EmitAndCheck(arm::Thumb2Assembler * assembler,const char * testname)188 void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
189   InitResults();
190   std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
191   ASSERT_NE(results, test_results.end());
192 
193   EmitAndCheck(assembler, testname, results->second);
194 }
195 
196 #undef __
197 
198 class Thumb2AssemblerTest : public ::testing::Test {
199  public:
Thumb2AssemblerTest()200   Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
201 
202   ArenaPool pool;
203   ArenaAllocator arena;
204   arm::Thumb2Assembler assembler;
205 };
206 
207 #define __ assembler.
208 
TEST_F(Thumb2AssemblerTest,SimpleMov)209 TEST_F(Thumb2AssemblerTest, SimpleMov) {
210   __ movs(R0, ShifterOperand(R1));
211   __ mov(R0, ShifterOperand(R1));
212   __ mov(R8, ShifterOperand(R9));
213 
214   __ mov(R0, ShifterOperand(1));
215   __ mov(R8, ShifterOperand(9));
216 
217   EmitAndCheck(&assembler, "SimpleMov");
218 }
219 
TEST_F(Thumb2AssemblerTest,SimpleMov32)220 TEST_F(Thumb2AssemblerTest, SimpleMov32) {
221   __ Force32Bit();
222 
223   __ mov(R0, ShifterOperand(R1));
224   __ mov(R8, ShifterOperand(R9));
225 
226   EmitAndCheck(&assembler, "SimpleMov32");
227 }
228 
TEST_F(Thumb2AssemblerTest,SimpleMovAdd)229 TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
230   __ mov(R0, ShifterOperand(R1));
231   __ adds(R0, R1, ShifterOperand(R2));
232   __ add(R0, R1, ShifterOperand(0));
233 
234   EmitAndCheck(&assembler, "SimpleMovAdd");
235 }
236 
TEST_F(Thumb2AssemblerTest,DataProcessingRegister)237 TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
238   // 32 bit variants using low registers.
239   __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
240   __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
241   __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
242   __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
243   __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
244   __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
245   __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
246   __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
247   __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
248   __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
249   __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
250   __ teq(R0, ShifterOperand(R1));
251 
252   // 16 bit variants using low registers.
253   __ movs(R0, ShifterOperand(R1));
254   __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
255   __ mvns(R0, ShifterOperand(R1));
256   __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
257   __ adds(R0, R1, ShifterOperand(R2));
258   __ subs(R0, R1, ShifterOperand(R2));
259   __ adcs(R0, R0, ShifterOperand(R1));
260   __ sbcs(R0, R0, ShifterOperand(R1));
261   __ ands(R0, R0, ShifterOperand(R1));
262   __ orrs(R0, R0, ShifterOperand(R1));
263   __ eors(R0, R0, ShifterOperand(R1));
264   __ bics(R0, R0, ShifterOperand(R1));
265   __ tst(R0, ShifterOperand(R1));
266   __ cmp(R0, ShifterOperand(R1));
267   __ cmn(R0, ShifterOperand(R1));
268 
269   // 16-bit variants using high registers.
270   __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
271   __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
272   __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
273   __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
274   __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
275   __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
276   __ cmp(R0, ShifterOperand(R9));
277   __ cmp(R8, ShifterOperand(R1));
278   __ cmp(R9, ShifterOperand(R8));
279 
280   // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
281   // an immediate (0) but emitted without any, so we test it here.
282   __ rsbs(R0, R1, ShifterOperand(0));
283   __ rsbs(R0, R0, ShifterOperand(0));  // Check Rd == Rn code path.
284 
285   // 32 bit variants using high registers that would be 16-bit if using low registers.
286   __ movs(R0, ShifterOperand(R8));
287   __ mvns(R0, ShifterOperand(R8));
288   __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
289   __ adds(R0, R1, ShifterOperand(R8));
290   __ subs(R0, R1, ShifterOperand(R8));
291   __ adcs(R0, R0, ShifterOperand(R8));
292   __ sbcs(R0, R0, ShifterOperand(R8));
293   __ ands(R0, R0, ShifterOperand(R8));
294   __ orrs(R0, R0, ShifterOperand(R8));
295   __ eors(R0, R0, ShifterOperand(R8));
296   __ bics(R0, R0, ShifterOperand(R8));
297   __ tst(R0, ShifterOperand(R8));
298   __ cmn(R0, ShifterOperand(R8));
299   __ rsbs(R0, R8, ShifterOperand(0));  // Check that this is not emitted as 16-bit.
300   __ rsbs(R8, R8, ShifterOperand(0));  // Check that this is not emitted as 16-bit (Rd == Rn).
301 
302   // 32-bit variants of instructions that would be 16-bit outside IT block.
303   __ it(arm::EQ);
304   __ mvns(R0, ShifterOperand(R1), arm::EQ);
305   __ it(arm::EQ);
306   __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
307   __ it(arm::EQ);
308   __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
309   __ it(arm::EQ);
310   __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
311   __ it(arm::EQ);
312   __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
313   __ it(arm::EQ);
314   __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
315   __ it(arm::EQ);
316   __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
317   __ it(arm::EQ);
318   __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
319   __ it(arm::EQ);
320   __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
321 
322   // 16-bit variants of instructions that would be 32-bit outside IT block.
323   __ it(arm::EQ);
324   __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
325   __ it(arm::EQ);
326   __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
327   __ it(arm::EQ);
328   __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
329   __ it(arm::EQ);
330   __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
331   __ it(arm::EQ);
332   __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
333   __ it(arm::EQ);
334   __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
335   __ it(arm::EQ);
336   __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
337   __ it(arm::EQ);
338   __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
339   __ it(arm::EQ);
340   __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
341 
342   // 16 bit variants selected for the default kCcDontCare.
343   __ mov(R0, ShifterOperand(R1));
344   __ mvn(R0, ShifterOperand(R1));
345   __ add(R0, R0, ShifterOperand(R1));
346   __ add(R0, R1, ShifterOperand(R2));
347   __ sub(R0, R1, ShifterOperand(R2));
348   __ adc(R0, R0, ShifterOperand(R1));
349   __ sbc(R0, R0, ShifterOperand(R1));
350   __ and_(R0, R0, ShifterOperand(R1));
351   __ orr(R0, R0, ShifterOperand(R1));
352   __ eor(R0, R0, ShifterOperand(R1));
353   __ bic(R0, R0, ShifterOperand(R1));
354   __ mov(R1, ShifterOperand(R8));
355   __ mov(R9, ShifterOperand(R0));
356   __ mov(R8, ShifterOperand(R9));
357   __ add(R1, R1, ShifterOperand(R8));
358   __ add(R9, R9, ShifterOperand(R0));
359   __ add(R8, R8, ShifterOperand(R9));
360   __ rsb(R0, R1, ShifterOperand(0));
361   __ rsb(R0, R0, ShifterOperand(0));
362 
363   // And an arbitrary 32-bit instruction using IP.
364   __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
365 
366   EmitAndCheck(&assembler, "DataProcessingRegister");
367 }
368 
TEST_F(Thumb2AssemblerTest,DataProcessingImmediate)369 TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
370   __ mov(R0, ShifterOperand(0x55));
371   __ mvn(R0, ShifterOperand(0x55));
372   __ add(R0, R1, ShifterOperand(0x55));
373   __ sub(R0, R1, ShifterOperand(0x55));
374   __ and_(R0, R1, ShifterOperand(0x55));
375   __ orr(R0, R1, ShifterOperand(0x55));
376   __ orn(R0, R1, ShifterOperand(0x55));
377   __ eor(R0, R1, ShifterOperand(0x55));
378   __ bic(R0, R1, ShifterOperand(0x55));
379   __ adc(R0, R1, ShifterOperand(0x55));
380   __ sbc(R0, R1, ShifterOperand(0x55));
381   __ rsb(R0, R1, ShifterOperand(0x55));
382 
383   __ tst(R0, ShifterOperand(0x55));
384   __ teq(R0, ShifterOperand(0x55));
385   __ cmp(R0, ShifterOperand(0x55));
386   __ cmn(R0, ShifterOperand(0x55));
387 
388   __ add(R0, R1, ShifterOperand(5));
389   __ sub(R0, R1, ShifterOperand(5));
390 
391   __ movs(R0, ShifterOperand(0x55));
392   __ mvns(R0, ShifterOperand(0x55));
393 
394   __ adds(R0, R1, ShifterOperand(5));
395   __ subs(R0, R1, ShifterOperand(5));
396 
397   EmitAndCheck(&assembler, "DataProcessingImmediate");
398 }
399 
TEST_F(Thumb2AssemblerTest,DataProcessingModifiedImmediate)400 TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
401   __ mov(R0, ShifterOperand(0x550055));
402   __ mvn(R0, ShifterOperand(0x550055));
403   __ add(R0, R1, ShifterOperand(0x550055));
404   __ sub(R0, R1, ShifterOperand(0x550055));
405   __ and_(R0, R1, ShifterOperand(0x550055));
406   __ orr(R0, R1, ShifterOperand(0x550055));
407   __ orn(R0, R1, ShifterOperand(0x550055));
408   __ eor(R0, R1, ShifterOperand(0x550055));
409   __ bic(R0, R1, ShifterOperand(0x550055));
410   __ adc(R0, R1, ShifterOperand(0x550055));
411   __ sbc(R0, R1, ShifterOperand(0x550055));
412   __ rsb(R0, R1, ShifterOperand(0x550055));
413 
414   __ tst(R0, ShifterOperand(0x550055));
415   __ teq(R0, ShifterOperand(0x550055));
416   __ cmp(R0, ShifterOperand(0x550055));
417   __ cmn(R0, ShifterOperand(0x550055));
418 
419   EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
420 }
421 
422 
TEST_F(Thumb2AssemblerTest,DataProcessingModifiedImmediates)423 TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
424   __ mov(R0, ShifterOperand(0x550055));
425   __ mov(R0, ShifterOperand(0x55005500));
426   __ mov(R0, ShifterOperand(0x55555555));
427   __ mov(R0, ShifterOperand(0xd5000000));       // rotated to first position
428   __ mov(R0, ShifterOperand(0x6a000000));       // rotated to second position
429   __ mov(R0, ShifterOperand(0x350));            // rotated to 2nd last position
430   __ mov(R0, ShifterOperand(0x1a8));            // rotated to last position
431 
432   EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
433 }
434 
TEST_F(Thumb2AssemblerTest,DataProcessingShiftedRegister)435 TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
436   // 16-bit variants.
437   __ movs(R3, ShifterOperand(R4, LSL, 4));
438   __ movs(R3, ShifterOperand(R4, LSR, 5));
439   __ movs(R3, ShifterOperand(R4, ASR, 6));
440 
441   // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
442   __ movs(R3, ShifterOperand(R4, ROR, 7));
443 
444   // 32-bit RRX because RRX has no 16-bit version.
445   __ movs(R3, ShifterOperand(R4, RRX));
446 
447   // 32 bit variants (not setting condition codes).
448   __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
449   __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
450   __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
451   __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
452   __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
453 
454   // 32 bit variants (high registers).
455   __ movs(R8, ShifterOperand(R4, LSL, 4));
456   __ movs(R8, ShifterOperand(R4, LSR, 5));
457   __ movs(R8, ShifterOperand(R4, ASR, 6));
458   __ movs(R8, ShifterOperand(R4, ROR, 7));
459   __ movs(R8, ShifterOperand(R4, RRX));
460 
461   EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
462 }
463 
TEST_F(Thumb2AssemblerTest,ShiftImmediate)464 TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
465   // Note: This test produces the same results as DataProcessingShiftedRegister
466   // but it does so using shift functions instead of mov().
467 
468   // 16-bit variants.
469   __ Lsl(R3, R4, 4);
470   __ Lsr(R3, R4, 5);
471   __ Asr(R3, R4, 6);
472 
473   // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
474   __ Ror(R3, R4, 7);
475 
476   // 32-bit RRX because RRX has no 16-bit version.
477   __ Rrx(R3, R4);
478 
479   // 32 bit variants (not setting condition codes).
480   __ Lsl(R3, R4, 4, AL, kCcKeep);
481   __ Lsr(R3, R4, 5, AL, kCcKeep);
482   __ Asr(R3, R4, 6, AL, kCcKeep);
483   __ Ror(R3, R4, 7, AL, kCcKeep);
484   __ Rrx(R3, R4, AL, kCcKeep);
485 
486   // 32 bit variants (high registers).
487   __ Lsls(R8, R4, 4);
488   __ Lsrs(R8, R4, 5);
489   __ Asrs(R8, R4, 6);
490   __ Rors(R8, R4, 7);
491   __ Rrxs(R8, R4);
492 
493   EmitAndCheck(&assembler, "ShiftImmediate");
494 }
495 
TEST_F(Thumb2AssemblerTest,BasicLoad)496 TEST_F(Thumb2AssemblerTest, BasicLoad) {
497   __ ldr(R3, Address(R4, 24));
498   __ ldrb(R3, Address(R4, 24));
499   __ ldrh(R3, Address(R4, 24));
500   __ ldrsb(R3, Address(R4, 24));
501   __ ldrsh(R3, Address(R4, 24));
502 
503   __ ldr(R3, Address(SP, 24));
504 
505   // 32 bit variants
506   __ ldr(R8, Address(R4, 24));
507   __ ldrb(R8, Address(R4, 24));
508   __ ldrh(R8, Address(R4, 24));
509   __ ldrsb(R8, Address(R4, 24));
510   __ ldrsh(R8, Address(R4, 24));
511 
512   EmitAndCheck(&assembler, "BasicLoad");
513 }
514 
515 
TEST_F(Thumb2AssemblerTest,BasicStore)516 TEST_F(Thumb2AssemblerTest, BasicStore) {
517   __ str(R3, Address(R4, 24));
518   __ strb(R3, Address(R4, 24));
519   __ strh(R3, Address(R4, 24));
520 
521   __ str(R3, Address(SP, 24));
522 
523   // 32 bit variants.
524   __ str(R8, Address(R4, 24));
525   __ strb(R8, Address(R4, 24));
526   __ strh(R8, Address(R4, 24));
527 
528   EmitAndCheck(&assembler, "BasicStore");
529 }
530 
TEST_F(Thumb2AssemblerTest,ComplexLoad)531 TEST_F(Thumb2AssemblerTest, ComplexLoad) {
532   __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
533   __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
534   __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
535   __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
536   __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
537   __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
538 
539   __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
540   __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
541   __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
542   __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
543   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
544   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
545 
546   __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
547   __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
548   __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
549   __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
550   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
551   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
552 
553   __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
554   __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
555   __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
556   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
557   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
558   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
559 
560   __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
561   __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
562   __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
563   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
564   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
565   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
566 
567   EmitAndCheck(&assembler, "ComplexLoad");
568 }
569 
570 
TEST_F(Thumb2AssemblerTest,ComplexStore)571 TEST_F(Thumb2AssemblerTest, ComplexStore) {
572   __ str(R3, Address(R4, 24, Address::Mode::Offset));
573   __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
574   __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
575   __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
576   __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
577   __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
578 
579   __ strb(R3, Address(R4, 24, Address::Mode::Offset));
580   __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
581   __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
582   __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
583   __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
584   __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
585 
586   __ strh(R3, Address(R4, 24, Address::Mode::Offset));
587   __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
588   __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
589   __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
590   __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
591   __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
592 
593   EmitAndCheck(&assembler, "ComplexStore");
594 }
595 
TEST_F(Thumb2AssemblerTest,NegativeLoadStore)596 TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
597   __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
598   __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
599   __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
600   __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
601   __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
602   __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
603 
604   __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
605   __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
606   __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
607   __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
608   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
609   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
610 
611   __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
612   __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
613   __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
614   __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
615   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
616   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
617 
618   __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
619   __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
620   __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
621   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
622   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
623   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
624 
625   __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
626   __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
627   __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
628   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
629   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
630   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
631 
632   __ str(R3, Address(R4, -24, Address::Mode::Offset));
633   __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
634   __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
635   __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
636   __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
637   __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
638 
639   __ strb(R3, Address(R4, -24, Address::Mode::Offset));
640   __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
641   __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
642   __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
643   __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
644   __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
645 
646   __ strh(R3, Address(R4, -24, Address::Mode::Offset));
647   __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
648   __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
649   __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
650   __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
651   __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
652 
653   EmitAndCheck(&assembler, "NegativeLoadStore");
654 }
655 
TEST_F(Thumb2AssemblerTest,SimpleLoadStoreDual)656 TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
657   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
658   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
659 
660   EmitAndCheck(&assembler, "SimpleLoadStoreDual");
661 }
662 
TEST_F(Thumb2AssemblerTest,ComplexLoadStoreDual)663 TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
664   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
665   __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
666   __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
667   __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
668   __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
669   __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
670 
671   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
672   __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
673   __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
674   __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
675   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
676   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
677 
678   EmitAndCheck(&assembler, "ComplexLoadStoreDual");
679 }
680 
TEST_F(Thumb2AssemblerTest,NegativeLoadStoreDual)681 TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
682   __ strd(R2, Address(R0, -24, Address::Mode::Offset));
683   __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
684   __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
685   __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
686   __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
687   __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
688 
689   __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
690   __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
691   __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
692   __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
693   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
694   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
695 
696   EmitAndCheck(&assembler, "NegativeLoadStoreDual");
697 }
698 
TEST_F(Thumb2AssemblerTest,SimpleBranch)699 TEST_F(Thumb2AssemblerTest, SimpleBranch) {
700   Label l1;
701   __ mov(R0, ShifterOperand(2));
702   __ Bind(&l1);
703   __ mov(R1, ShifterOperand(1));
704   __ b(&l1);
705   Label l2;
706   __ b(&l2);
707   __ mov(R1, ShifterOperand(2));
708   __ Bind(&l2);
709   __ mov(R0, ShifterOperand(3));
710 
711   Label l3;
712   __ mov(R0, ShifterOperand(2));
713   __ Bind(&l3);
714   __ mov(R1, ShifterOperand(1));
715   __ b(&l3, EQ);
716 
717   Label l4;
718   __ b(&l4, EQ);
719   __ mov(R1, ShifterOperand(2));
720   __ Bind(&l4);
721   __ mov(R0, ShifterOperand(3));
722 
723   // 2 linked labels.
724   Label l5;
725   __ b(&l5);
726   __ mov(R1, ShifterOperand(4));
727   __ b(&l5);
728   __ mov(R1, ShifterOperand(5));
729   __ Bind(&l5);
730   __ mov(R0, ShifterOperand(6));
731 
732   EmitAndCheck(&assembler, "SimpleBranch");
733 }
734 
TEST_F(Thumb2AssemblerTest,LongBranch)735 TEST_F(Thumb2AssemblerTest, LongBranch) {
736   __ Force32Bit();
737   // 32 bit branches.
738   Label l1;
739   __ mov(R0, ShifterOperand(2));
740   __ Bind(&l1);
741   __ mov(R1, ShifterOperand(1));
742   __ b(&l1);
743 
744   Label l2;
745   __ b(&l2);
746   __ mov(R1, ShifterOperand(2));
747   __ Bind(&l2);
748   __ mov(R0, ShifterOperand(3));
749 
750   Label l3;
751   __ mov(R0, ShifterOperand(2));
752   __ Bind(&l3);
753   __ mov(R1, ShifterOperand(1));
754   __ b(&l3, EQ);
755 
756   Label l4;
757   __ b(&l4, EQ);
758   __ mov(R1, ShifterOperand(2));
759   __ Bind(&l4);
760   __ mov(R0, ShifterOperand(3));
761 
762   // 2 linked labels.
763   Label l5;
764   __ b(&l5);
765   __ mov(R1, ShifterOperand(4));
766   __ b(&l5);
767   __ mov(R1, ShifterOperand(5));
768   __ Bind(&l5);
769   __ mov(R0, ShifterOperand(6));
770 
771   EmitAndCheck(&assembler, "LongBranch");
772 }
773 
TEST_F(Thumb2AssemblerTest,LoadMultiple)774 TEST_F(Thumb2AssemblerTest, LoadMultiple) {
775   // 16 bit.
776   __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
777 
778   // 32 bit.
779   __ ldm(DB_W, R4, (1 << LR | 1 << R11));
780   __ ldm(DB, R4, (1 << LR | 1 << R11));
781 
782   // Single reg is converted to ldr
783   __ ldm(DB_W, R4, (1 << R5));
784 
785   EmitAndCheck(&assembler, "LoadMultiple");
786 }
787 
TEST_F(Thumb2AssemblerTest,StoreMultiple)788 TEST_F(Thumb2AssemblerTest, StoreMultiple) {
789   // 16 bit.
790   __ stm(IA_W, R4, (1 << R0 | 1 << R3));
791 
792   // 32 bit.
793   __ stm(IA_W, R4, (1 << LR | 1 << R11));
794   __ stm(IA, R4, (1 << LR | 1 << R11));
795 
796   // Single reg is converted to str
797   __ stm(IA_W, R4, (1 << R5));
798   __ stm(IA, R4, (1 << R5));
799 
800   EmitAndCheck(&assembler, "StoreMultiple");
801 }
802 
TEST_F(Thumb2AssemblerTest,MovWMovT)803 TEST_F(Thumb2AssemblerTest, MovWMovT) {
804   // Always 32 bit.
805   __ movw(R4, 0);
806   __ movw(R4, 0x34);
807   __ movw(R9, 0x34);
808   __ movw(R3, 0x1234);
809   __ movw(R9, 0xffff);
810 
811   // Always 32 bit.
812   __ movt(R0, 0);
813   __ movt(R0, 0x1234);
814   __ movt(R1, 0xffff);
815 
816   EmitAndCheck(&assembler, "MovWMovT");
817 }
818 
TEST_F(Thumb2AssemblerTest,SpecialAddSub)819 TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
820   __ add(R2, SP, ShifterOperand(0x50));   // 16 bit.
821   __ add(SP, SP, ShifterOperand(0x50));   // 16 bit.
822   __ add(R8, SP, ShifterOperand(0x50));   // 32 bit.
823 
824   __ add(R2, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
825   __ add(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
826   __ add(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
827 
828   __ sub(SP, SP, ShifterOperand(0x50));   // 16 bit
829   __ sub(R0, SP, ShifterOperand(0x50));   // 32 bit
830   __ sub(R8, SP, ShifterOperand(0x50));   // 32 bit.
831 
832   __ sub(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size
833   __ sub(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
834 
835   EmitAndCheck(&assembler, "SpecialAddSub");
836 }
837 
TEST_F(Thumb2AssemblerTest,LoadFromOffset)838 TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
839   __ LoadFromOffset(kLoadWord, R2, R4, 12);
840   __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
841   __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
842   __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
843   __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
844   __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
845   __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
846   __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
847   __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
848   __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
849   __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
850   __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
851   __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
852   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
853   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
854   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
855   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
856   __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
857 
858   __ LoadFromOffset(kLoadWord, R0, R12, 12);  // 32-bit because of R12.
859   __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
860 
861   __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
862   __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
863   __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
864 
865   EmitAndCheck(&assembler, "LoadFromOffset");
866 }
867 
TEST_F(Thumb2AssemblerTest,StoreToOffset)868 TEST_F(Thumb2AssemblerTest, StoreToOffset) {
869   __ StoreToOffset(kStoreWord, R2, R4, 12);
870   __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
871   __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
872   __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
873   __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
874   __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
875   __ StoreToOffset(kStoreHalfword, R2, R4, 12);
876   __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
877   __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
878   __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
879   __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
880   __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
881   __ StoreToOffset(kStoreWordPair, R2, R4, 12);
882   __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
883   __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
884   __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
885   __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
886   __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
887 
888   __ StoreToOffset(kStoreWord, R0, R12, 12);  // 32-bit because of R12.
889   __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
890 
891   __ StoreToOffset(kStoreByte, R2, R4, 12);
892 
893   EmitAndCheck(&assembler, "StoreToOffset");
894 }
895 
TEST_F(Thumb2AssemblerTest,IfThen)896 TEST_F(Thumb2AssemblerTest, IfThen) {
897   __ it(EQ);
898   __ mov(R1, ShifterOperand(1), EQ);
899 
900   __ it(EQ, kItThen);
901   __ mov(R1, ShifterOperand(1), EQ);
902   __ mov(R2, ShifterOperand(2), EQ);
903 
904   __ it(EQ, kItElse);
905   __ mov(R1, ShifterOperand(1), EQ);
906   __ mov(R2, ShifterOperand(2), NE);
907 
908   __ it(EQ, kItThen, kItElse);
909   __ mov(R1, ShifterOperand(1), EQ);
910   __ mov(R2, ShifterOperand(2), EQ);
911   __ mov(R3, ShifterOperand(3), NE);
912 
913   __ it(EQ, kItElse, kItElse);
914   __ mov(R1, ShifterOperand(1), EQ);
915   __ mov(R2, ShifterOperand(2), NE);
916   __ mov(R3, ShifterOperand(3), NE);
917 
918   __ it(EQ, kItThen, kItThen, kItElse);
919   __ mov(R1, ShifterOperand(1), EQ);
920   __ mov(R2, ShifterOperand(2), EQ);
921   __ mov(R3, ShifterOperand(3), EQ);
922   __ mov(R4, ShifterOperand(4), NE);
923 
924   EmitAndCheck(&assembler, "IfThen");
925 }
926 
TEST_F(Thumb2AssemblerTest,CbzCbnz)927 TEST_F(Thumb2AssemblerTest, CbzCbnz) {
928   Label l1;
929   __ cbz(R2, &l1);
930   __ mov(R1, ShifterOperand(3));
931   __ mov(R2, ShifterOperand(3));
932   __ Bind(&l1);
933   __ mov(R2, ShifterOperand(4));
934 
935   Label l2;
936   __ cbnz(R2, &l2);
937   __ mov(R8, ShifterOperand(3));
938   __ mov(R2, ShifterOperand(3));
939   __ Bind(&l2);
940   __ mov(R2, ShifterOperand(4));
941 
942   EmitAndCheck(&assembler, "CbzCbnz");
943 }
944 
TEST_F(Thumb2AssemblerTest,Multiply)945 TEST_F(Thumb2AssemblerTest, Multiply) {
946   __ mul(R0, R1, R0);
947   __ mul(R0, R1, R2);
948   __ mul(R8, R9, R8);
949   __ mul(R8, R9, R10);
950 
951   __ mla(R0, R1, R2, R3);
952   __ mla(R8, R9, R8, R9);
953 
954   __ mls(R0, R1, R2, R3);
955   __ mls(R8, R9, R8, R9);
956 
957   __ umull(R0, R1, R2, R3);
958   __ umull(R8, R9, R10, R11);
959 
960   EmitAndCheck(&assembler, "Multiply");
961 }
962 
TEST_F(Thumb2AssemblerTest,Divide)963 TEST_F(Thumb2AssemblerTest, Divide) {
964   __ sdiv(R0, R1, R2);
965   __ sdiv(R8, R9, R10);
966 
967   __ udiv(R0, R1, R2);
968   __ udiv(R8, R9, R10);
969 
970   EmitAndCheck(&assembler, "Divide");
971 }
972 
TEST_F(Thumb2AssemblerTest,VMov)973 TEST_F(Thumb2AssemblerTest, VMov) {
974   __ vmovs(S1, 1.0);
975   __ vmovd(D1, 1.0);
976 
977   __ vmovs(S1, S2);
978   __ vmovd(D1, D2);
979 
980   EmitAndCheck(&assembler, "VMov");
981 }
982 
983 
TEST_F(Thumb2AssemblerTest,BasicFloatingPoint)984 TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
985   __ vadds(S0, S1, S2);
986   __ vsubs(S0, S1, S2);
987   __ vmuls(S0, S1, S2);
988   __ vmlas(S0, S1, S2);
989   __ vmlss(S0, S1, S2);
990   __ vdivs(S0, S1, S2);
991   __ vabss(S0, S1);
992   __ vnegs(S0, S1);
993   __ vsqrts(S0, S1);
994 
995   __ vaddd(D0, D1, D2);
996   __ vsubd(D0, D1, D2);
997   __ vmuld(D0, D1, D2);
998   __ vmlad(D0, D1, D2);
999   __ vmlsd(D0, D1, D2);
1000   __ vdivd(D0, D1, D2);
1001   __ vabsd(D0, D1);
1002   __ vnegd(D0, D1);
1003   __ vsqrtd(D0, D1);
1004 
1005   EmitAndCheck(&assembler, "BasicFloatingPoint");
1006 }
1007 
TEST_F(Thumb2AssemblerTest,FloatingPointConversions)1008 TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
1009   __ vcvtsd(S2, D2);
1010   __ vcvtds(D2, S2);
1011 
1012   __ vcvtis(S1, S2);
1013   __ vcvtsi(S1, S2);
1014 
1015   __ vcvtid(S1, D2);
1016   __ vcvtdi(D1, S2);
1017 
1018   __ vcvtus(S1, S2);
1019   __ vcvtsu(S1, S2);
1020 
1021   __ vcvtud(S1, D2);
1022   __ vcvtdu(D1, S2);
1023 
1024   EmitAndCheck(&assembler, "FloatingPointConversions");
1025 }
1026 
TEST_F(Thumb2AssemblerTest,FloatingPointComparisons)1027 TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
1028   __ vcmps(S0, S1);
1029   __ vcmpd(D0, D1);
1030 
1031   __ vcmpsz(S2);
1032   __ vcmpdz(D2);
1033 
1034   EmitAndCheck(&assembler, "FloatingPointComparisons");
1035 }
1036 
TEST_F(Thumb2AssemblerTest,Calls)1037 TEST_F(Thumb2AssemblerTest, Calls) {
1038   __ blx(LR);
1039   __ bx(LR);
1040 
1041   EmitAndCheck(&assembler, "Calls");
1042 }
1043 
TEST_F(Thumb2AssemblerTest,Breakpoint)1044 TEST_F(Thumb2AssemblerTest, Breakpoint) {
1045   __ bkpt(0);
1046 
1047   EmitAndCheck(&assembler, "Breakpoint");
1048 }
1049 
TEST_F(Thumb2AssemblerTest,StrR1)1050 TEST_F(Thumb2AssemblerTest, StrR1) {
1051   __ str(R1, Address(SP, 68));
1052   __ str(R1, Address(SP, 1068));
1053 
1054   EmitAndCheck(&assembler, "StrR1");
1055 }
1056 
TEST_F(Thumb2AssemblerTest,VPushPop)1057 TEST_F(Thumb2AssemblerTest, VPushPop) {
1058   __ vpushs(S2, 4);
1059   __ vpushd(D2, 4);
1060 
1061   __ vpops(S2, 4);
1062   __ vpopd(D2, 4);
1063 
1064   EmitAndCheck(&assembler, "VPushPop");
1065 }
1066 
TEST_F(Thumb2AssemblerTest,Max16BitBranch)1067 TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
1068   Label l1;
1069   __ b(&l1);
1070   for (int i = 0 ; i < (1 << 11) ; i += 2) {
1071     __ mov(R3, ShifterOperand(i & 0xff));
1072   }
1073   __ Bind(&l1);
1074   __ mov(R1, ShifterOperand(R2));
1075 
1076   EmitAndCheck(&assembler, "Max16BitBranch");
1077 }
1078 
TEST_F(Thumb2AssemblerTest,Branch32)1079 TEST_F(Thumb2AssemblerTest, Branch32) {
1080   Label l1;
1081   __ b(&l1);
1082   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1083     __ mov(R3, ShifterOperand(i & 0xff));
1084   }
1085   __ Bind(&l1);
1086   __ mov(R1, ShifterOperand(R2));
1087 
1088   EmitAndCheck(&assembler, "Branch32");
1089 }
1090 
TEST_F(Thumb2AssemblerTest,CompareAndBranchMax)1091 TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
1092   Label l1;
1093   __ cbz(R4, &l1);
1094   for (int i = 0 ; i < (1 << 7) ; i += 2) {
1095     __ mov(R3, ShifterOperand(i & 0xff));
1096   }
1097   __ Bind(&l1);
1098   __ mov(R1, ShifterOperand(R2));
1099 
1100   EmitAndCheck(&assembler, "CompareAndBranchMax");
1101 }
1102 
TEST_F(Thumb2AssemblerTest,CompareAndBranchRelocation16)1103 TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1104   Label l1;
1105   __ cbz(R4, &l1);
1106   for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1107     __ mov(R3, ShifterOperand(i & 0xff));
1108   }
1109   __ Bind(&l1);
1110   __ mov(R1, ShifterOperand(R2));
1111 
1112   EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
1113 }
1114 
TEST_F(Thumb2AssemblerTest,CompareAndBranchRelocation32)1115 TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1116   Label l1;
1117   __ cbz(R4, &l1);
1118   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1119     __ mov(R3, ShifterOperand(i & 0xff));
1120   }
1121   __ Bind(&l1);
1122   __ mov(R1, ShifterOperand(R2));
1123 
1124   EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
1125 }
1126 
TEST_F(Thumb2AssemblerTest,MixedBranch32)1127 TEST_F(Thumb2AssemblerTest, MixedBranch32) {
1128   Label l1;
1129   Label l2;
1130   __ b(&l1);      // Forwards.
1131   __ Bind(&l2);
1132 
1133   // Space to force relocation.
1134   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1135     __ mov(R3, ShifterOperand(i & 0xff));
1136   }
1137   __ b(&l2);      // Backwards.
1138   __ Bind(&l1);
1139   __ mov(R1, ShifterOperand(R2));
1140 
1141   EmitAndCheck(&assembler, "MixedBranch32");
1142 }
1143 
TEST_F(Thumb2AssemblerTest,Shifts)1144 TEST_F(Thumb2AssemblerTest, Shifts) {
1145   // 16 bit selected for CcDontCare.
1146   __ Lsl(R0, R1, 5);
1147   __ Lsr(R0, R1, 5);
1148   __ Asr(R0, R1, 5);
1149 
1150   __ Lsl(R0, R0, R1);
1151   __ Lsr(R0, R0, R1);
1152   __ Asr(R0, R0, R1);
1153   __ Ror(R0, R0, R1);
1154 
1155   // 16 bit with kCcSet.
1156   __ Lsls(R0, R1, 5);
1157   __ Lsrs(R0, R1, 5);
1158   __ Asrs(R0, R1, 5);
1159 
1160   __ Lsls(R0, R0, R1);
1161   __ Lsrs(R0, R0, R1);
1162   __ Asrs(R0, R0, R1);
1163   __ Rors(R0, R0, R1);
1164 
1165   // 32-bit with kCcKeep.
1166   __ Lsl(R0, R1, 5, AL, kCcKeep);
1167   __ Lsr(R0, R1, 5, AL, kCcKeep);
1168   __ Asr(R0, R1, 5, AL, kCcKeep);
1169 
1170   __ Lsl(R0, R0, R1, AL, kCcKeep);
1171   __ Lsr(R0, R0, R1, AL, kCcKeep);
1172   __ Asr(R0, R0, R1, AL, kCcKeep);
1173   __ Ror(R0, R0, R1, AL, kCcKeep);
1174 
1175   // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
1176   __ Ror(R0, R1, 5);
1177   __ Rors(R0, R1, 5);
1178   __ Ror(R0, R1, 5, AL, kCcKeep);
1179 
1180   // 32 bit due to high registers.
1181   __ Lsl(R8, R1, 5);
1182   __ Lsr(R0, R8, 5);
1183   __ Asr(R8, R1, 5);
1184   __ Ror(R0, R8, 5);
1185 
1186   // 32 bit due to different Rd and Rn.
1187   __ Lsl(R0, R1, R2);
1188   __ Lsr(R0, R1, R2);
1189   __ Asr(R0, R1, R2);
1190   __ Ror(R0, R1, R2);
1191 
1192   // 32 bit due to use of high registers.
1193   __ Lsl(R8, R1, R2);
1194   __ Lsr(R0, R8, R2);
1195   __ Asr(R0, R1, R8);
1196 
1197   // S bit (all 32 bit)
1198 
1199   // 32 bit due to high registers.
1200   __ Lsls(R8, R1, 5);
1201   __ Lsrs(R0, R8, 5);
1202   __ Asrs(R8, R1, 5);
1203   __ Rors(R0, R8, 5);
1204 
1205   // 32 bit due to different Rd and Rn.
1206   __ Lsls(R0, R1, R2);
1207   __ Lsrs(R0, R1, R2);
1208   __ Asrs(R0, R1, R2);
1209   __ Rors(R0, R1, R2);
1210 
1211   // 32 bit due to use of high registers.
1212   __ Lsls(R8, R1, R2);
1213   __ Lsrs(R0, R8, R2);
1214   __ Asrs(R0, R1, R8);
1215 
1216   EmitAndCheck(&assembler, "Shifts");
1217 }
1218 
TEST_F(Thumb2AssemblerTest,LoadStoreRegOffset)1219 TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
1220   // 16 bit.
1221   __ ldr(R0, Address(R1, R2));
1222   __ str(R0, Address(R1, R2));
1223 
1224   // 32 bit due to shift.
1225   __ ldr(R0, Address(R1, R2, LSL, 1));
1226   __ str(R0, Address(R1, R2, LSL, 1));
1227 
1228   __ ldr(R0, Address(R1, R2, LSL, 3));
1229   __ str(R0, Address(R1, R2, LSL, 3));
1230 
1231   // 32 bit due to high register use.
1232   __ ldr(R8, Address(R1, R2));
1233   __ str(R8, Address(R1, R2));
1234 
1235   __ ldr(R1, Address(R8, R2));
1236   __ str(R2, Address(R8, R2));
1237 
1238   __ ldr(R0, Address(R1, R8));
1239   __ str(R0, Address(R1, R8));
1240 
1241   EmitAndCheck(&assembler, "LoadStoreRegOffset");
1242 }
1243 
TEST_F(Thumb2AssemblerTest,LoadStoreLiteral)1244 TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) {
1245   __ ldr(R0, Address(4));
1246   __ str(R0, Address(4));
1247 
1248   __ ldr(R0, Address(-8));
1249   __ str(R0, Address(-8));
1250 
1251   // Limits.
1252   __ ldr(R0, Address(0x3ff));       // 10 bits (16 bit).
1253   __ ldr(R0, Address(0x7ff));       // 11 bits (32 bit).
1254   __ str(R0, Address(0x3ff));       // 32 bit (no 16 bit str(literal)).
1255   __ str(R0, Address(0x7ff));       // 11 bits (32 bit).
1256 
1257   EmitAndCheck(&assembler, "LoadStoreLiteral");
1258 }
1259 
TEST_F(Thumb2AssemblerTest,LoadStoreLimits)1260 TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
1261   __ ldr(R0, Address(R4, 124));     // 16 bit.
1262   __ ldr(R0, Address(R4, 128));     // 32 bit.
1263 
1264   __ ldrb(R0, Address(R4, 31));     // 16 bit.
1265   __ ldrb(R0, Address(R4, 32));     // 32 bit.
1266 
1267   __ ldrh(R0, Address(R4, 62));     // 16 bit.
1268   __ ldrh(R0, Address(R4, 64));     // 32 bit.
1269 
1270   __ ldrsb(R0, Address(R4, 31));     // 32 bit.
1271   __ ldrsb(R0, Address(R4, 32));     // 32 bit.
1272 
1273   __ ldrsh(R0, Address(R4, 62));     // 32 bit.
1274   __ ldrsh(R0, Address(R4, 64));     // 32 bit.
1275 
1276   __ str(R0, Address(R4, 124));     // 16 bit.
1277   __ str(R0, Address(R4, 128));     // 32 bit.
1278 
1279   __ strb(R0, Address(R4, 31));     // 16 bit.
1280   __ strb(R0, Address(R4, 32));     // 32 bit.
1281 
1282   __ strh(R0, Address(R4, 62));     // 16 bit.
1283   __ strh(R0, Address(R4, 64));     // 32 bit.
1284 
1285   EmitAndCheck(&assembler, "LoadStoreLimits");
1286 }
1287 
TEST_F(Thumb2AssemblerTest,CompareAndBranch)1288 TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
1289   Label label;
1290   __ CompareAndBranchIfZero(arm::R0, &label);
1291   __ CompareAndBranchIfZero(arm::R11, &label);
1292   __ CompareAndBranchIfNonZero(arm::R0, &label);
1293   __ CompareAndBranchIfNonZero(arm::R11, &label);
1294   __ Bind(&label);
1295 
1296   EmitAndCheck(&assembler, "CompareAndBranch");
1297 }
1298 
TEST_F(Thumb2AssemblerTest,AddConstant)1299 TEST_F(Thumb2AssemblerTest, AddConstant) {
1300   // Low registers, Rd != Rn.
1301   __ AddConstant(R0, R1, 0);                          // MOV.
1302   __ AddConstant(R0, R1, 1);                          // 16-bit ADDS, encoding T1.
1303   __ AddConstant(R0, R1, 7);                          // 16-bit ADDS, encoding T1.
1304   __ AddConstant(R0, R1, 8);                          // 32-bit ADD, encoding T3.
1305   __ AddConstant(R0, R1, 255);                        // 32-bit ADD, encoding T3.
1306   __ AddConstant(R0, R1, 256);                        // 32-bit ADD, encoding T3.
1307   __ AddConstant(R0, R1, 257);                        // 32-bit ADD, encoding T4.
1308   __ AddConstant(R0, R1, 0xfff);                      // 32-bit ADD, encoding T4.
1309   __ AddConstant(R0, R1, 0x1000);                     // 32-bit ADD, encoding T3.
1310   __ AddConstant(R0, R1, 0x1001);                     // MVN+SUB.
1311   __ AddConstant(R0, R1, 0x1002);                     // MOVW+ADD.
1312   __ AddConstant(R0, R1, 0xffff);                     // MOVW+ADD.
1313   __ AddConstant(R0, R1, 0x10000);                    // 32-bit ADD, encoding T3.
1314   __ AddConstant(R0, R1, 0x10001);                    // 32-bit ADD, encoding T3.
1315   __ AddConstant(R0, R1, 0x10002);                    // MVN+SUB.
1316   __ AddConstant(R0, R1, 0x10003);                    // MOVW+MOVT+ADD.
1317   __ AddConstant(R0, R1, -1);                         // 16-bit SUBS.
1318   __ AddConstant(R0, R1, -7);                         // 16-bit SUBS.
1319   __ AddConstant(R0, R1, -8);                         // 32-bit SUB, encoding T3.
1320   __ AddConstant(R0, R1, -255);                       // 32-bit SUB, encoding T3.
1321   __ AddConstant(R0, R1, -256);                       // 32-bit SUB, encoding T3.
1322   __ AddConstant(R0, R1, -257);                       // 32-bit SUB, encoding T4.
1323   __ AddConstant(R0, R1, -0xfff);                     // 32-bit SUB, encoding T4.
1324   __ AddConstant(R0, R1, -0x1000);                    // 32-bit SUB, encoding T3.
1325   __ AddConstant(R0, R1, -0x1001);                    // MVN+ADD.
1326   __ AddConstant(R0, R1, -0x1002);                    // MOVW+SUB.
1327   __ AddConstant(R0, R1, -0xffff);                    // MOVW+SUB.
1328   __ AddConstant(R0, R1, -0x10000);                   // 32-bit SUB, encoding T3.
1329   __ AddConstant(R0, R1, -0x10001);                   // 32-bit SUB, encoding T3.
1330   __ AddConstant(R0, R1, -0x10002);                   // MVN+ADD.
1331   __ AddConstant(R0, R1, -0x10003);                   // MOVW+MOVT+ADD.
1332 
1333   // Low registers, Rd == Rn.
1334   __ AddConstant(R0, R0, 0);                          // Nothing.
1335   __ AddConstant(R1, R1, 1);                          // 16-bit ADDS, encoding T2,
1336   __ AddConstant(R0, R0, 7);                          // 16-bit ADDS, encoding T2.
1337   __ AddConstant(R1, R1, 8);                          // 16-bit ADDS, encoding T2.
1338   __ AddConstant(R0, R0, 255);                        // 16-bit ADDS, encoding T2.
1339   __ AddConstant(R1, R1, 256);                        // 32-bit ADD, encoding T3.
1340   __ AddConstant(R0, R0, 257);                        // 32-bit ADD, encoding T4.
1341   __ AddConstant(R1, R1, 0xfff);                      // 32-bit ADD, encoding T4.
1342   __ AddConstant(R0, R0, 0x1000);                     // 32-bit ADD, encoding T3.
1343   __ AddConstant(R1, R1, 0x1001);                     // MVN+SUB.
1344   __ AddConstant(R0, R0, 0x1002);                     // MOVW+ADD.
1345   __ AddConstant(R1, R1, 0xffff);                     // MOVW+ADD.
1346   __ AddConstant(R0, R0, 0x10000);                    // 32-bit ADD, encoding T3.
1347   __ AddConstant(R1, R1, 0x10001);                    // 32-bit ADD, encoding T3.
1348   __ AddConstant(R0, R0, 0x10002);                    // MVN+SUB.
1349   __ AddConstant(R1, R1, 0x10003);                    // MOVW+MOVT+ADD.
1350   __ AddConstant(R0, R0, -1);                         // 16-bit SUBS, encoding T2.
1351   __ AddConstant(R1, R1, -7);                         // 16-bit SUBS, encoding T2.
1352   __ AddConstant(R0, R0, -8);                         // 16-bit SUBS, encoding T2.
1353   __ AddConstant(R1, R1, -255);                       // 16-bit SUBS, encoding T2.
1354   __ AddConstant(R0, R0, -256);                       // 32-bit SUB, encoding T3.
1355   __ AddConstant(R1, R1, -257);                       // 32-bit SUB, encoding T4.
1356   __ AddConstant(R0, R0, -0xfff);                     // 32-bit SUB, encoding T4.
1357   __ AddConstant(R1, R1, -0x1000);                    // 32-bit SUB, encoding T3.
1358   __ AddConstant(R0, R0, -0x1001);                    // MVN+ADD.
1359   __ AddConstant(R1, R1, -0x1002);                    // MOVW+SUB.
1360   __ AddConstant(R0, R0, -0xffff);                    // MOVW+SUB.
1361   __ AddConstant(R1, R1, -0x10000);                   // 32-bit SUB, encoding T3.
1362   __ AddConstant(R0, R0, -0x10001);                   // 32-bit SUB, encoding T3.
1363   __ AddConstant(R1, R1, -0x10002);                   // MVN+ADD.
1364   __ AddConstant(R0, R0, -0x10003);                   // MOVW+MOVT+ADD.
1365 
1366   // High registers.
1367   __ AddConstant(R8, R8, 0);                          // Nothing.
1368   __ AddConstant(R8, R1, 1);                          // 32-bit ADD, encoding T3,
1369   __ AddConstant(R0, R8, 7);                          // 32-bit ADD, encoding T3.
1370   __ AddConstant(R8, R8, 8);                          // 32-bit ADD, encoding T3.
1371   __ AddConstant(R8, R1, 255);                        // 32-bit ADD, encoding T3.
1372   __ AddConstant(R0, R8, 256);                        // 32-bit ADD, encoding T3.
1373   __ AddConstant(R8, R8, 257);                        // 32-bit ADD, encoding T4.
1374   __ AddConstant(R8, R1, 0xfff);                      // 32-bit ADD, encoding T4.
1375   __ AddConstant(R0, R8, 0x1000);                     // 32-bit ADD, encoding T3.
1376   __ AddConstant(R8, R8, 0x1001);                     // MVN+SUB.
1377   __ AddConstant(R0, R1, 0x1002);                     // MOVW+ADD.
1378   __ AddConstant(R0, R8, 0xffff);                     // MOVW+ADD.
1379   __ AddConstant(R8, R8, 0x10000);                    // 32-bit ADD, encoding T3.
1380   __ AddConstant(R8, R1, 0x10001);                    // 32-bit ADD, encoding T3.
1381   __ AddConstant(R0, R8, 0x10002);                    // MVN+SUB.
1382   __ AddConstant(R0, R8, 0x10003);                    // MOVW+MOVT+ADD.
1383   __ AddConstant(R8, R8, -1);                         // 32-bit ADD, encoding T3.
1384   __ AddConstant(R8, R1, -7);                         // 32-bit SUB, encoding T3.
1385   __ AddConstant(R0, R8, -8);                         // 32-bit SUB, encoding T3.
1386   __ AddConstant(R8, R8, -255);                       // 32-bit SUB, encoding T3.
1387   __ AddConstant(R8, R1, -256);                       // 32-bit SUB, encoding T3.
1388   __ AddConstant(R0, R8, -257);                       // 32-bit SUB, encoding T4.
1389   __ AddConstant(R8, R8, -0xfff);                     // 32-bit SUB, encoding T4.
1390   __ AddConstant(R8, R1, -0x1000);                    // 32-bit SUB, encoding T3.
1391   __ AddConstant(R0, R8, -0x1001);                    // MVN+ADD.
1392   __ AddConstant(R0, R1, -0x1002);                    // MOVW+SUB.
1393   __ AddConstant(R8, R1, -0xffff);                    // MOVW+SUB.
1394   __ AddConstant(R0, R8, -0x10000);                   // 32-bit SUB, encoding T3.
1395   __ AddConstant(R8, R8, -0x10001);                   // 32-bit SUB, encoding T3.
1396   __ AddConstant(R8, R1, -0x10002);                   // MVN+SUB.
1397   __ AddConstant(R0, R8, -0x10003);                   // MOVW+MOVT+ADD.
1398 
1399   // Low registers, Rd != Rn, kCcKeep.
1400   __ AddConstant(R0, R1, 0, AL, kCcKeep);             // MOV.
1401   __ AddConstant(R0, R1, 1, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1402   __ AddConstant(R0, R1, 7, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1403   __ AddConstant(R0, R1, 8, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1404   __ AddConstant(R0, R1, 255, AL, kCcKeep);           // 32-bit ADD, encoding T3.
1405   __ AddConstant(R0, R1, 256, AL, kCcKeep);           // 32-bit ADD, encoding T3.
1406   __ AddConstant(R0, R1, 257, AL, kCcKeep);           // 32-bit ADD, encoding T4.
1407   __ AddConstant(R0, R1, 0xfff, AL, kCcKeep);         // 32-bit ADD, encoding T4.
1408   __ AddConstant(R0, R1, 0x1000, AL, kCcKeep);        // 32-bit ADD, encoding T3.
1409   __ AddConstant(R0, R1, 0x1001, AL, kCcKeep);        // MVN+SUB.
1410   __ AddConstant(R0, R1, 0x1002, AL, kCcKeep);        // MOVW+ADD.
1411   __ AddConstant(R0, R1, 0xffff, AL, kCcKeep);        // MOVW+ADD.
1412   __ AddConstant(R0, R1, 0x10000, AL, kCcKeep);       // 32-bit ADD, encoding T3.
1413   __ AddConstant(R0, R1, 0x10001, AL, kCcKeep);       // 32-bit ADD, encoding T3.
1414   __ AddConstant(R0, R1, 0x10002, AL, kCcKeep);       // MVN+SUB.
1415   __ AddConstant(R0, R1, 0x10003, AL, kCcKeep);       // MOVW+MOVT+ADD.
1416   __ AddConstant(R0, R1, -1, AL, kCcKeep);            // 32-bit ADD, encoding T3.
1417   __ AddConstant(R0, R1, -7, AL, kCcKeep);            // 32-bit SUB, encoding T3.
1418   __ AddConstant(R0, R1, -8, AL, kCcKeep);            // 32-bit SUB, encoding T3.
1419   __ AddConstant(R0, R1, -255, AL, kCcKeep);          // 32-bit SUB, encoding T3.
1420   __ AddConstant(R0, R1, -256, AL, kCcKeep);          // 32-bit SUB, encoding T3.
1421   __ AddConstant(R0, R1, -257, AL, kCcKeep);          // 32-bit SUB, encoding T4.
1422   __ AddConstant(R0, R1, -0xfff, AL, kCcKeep);        // 32-bit SUB, encoding T4.
1423   __ AddConstant(R0, R1, -0x1000, AL, kCcKeep);       // 32-bit SUB, encoding T3.
1424   __ AddConstant(R0, R1, -0x1001, AL, kCcKeep);       // MVN+ADD.
1425   __ AddConstant(R0, R1, -0x1002, AL, kCcKeep);       // MOVW+SUB.
1426   __ AddConstant(R0, R1, -0xffff, AL, kCcKeep);       // MOVW+SUB.
1427   __ AddConstant(R0, R1, -0x10000, AL, kCcKeep);      // 32-bit SUB, encoding T3.
1428   __ AddConstant(R0, R1, -0x10001, AL, kCcKeep);      // 32-bit SUB, encoding T3.
1429   __ AddConstant(R0, R1, -0x10002, AL, kCcKeep);      // MVN+ADD.
1430   __ AddConstant(R0, R1, -0x10003, AL, kCcKeep);      // MOVW+MOVT+ADD.
1431 
1432   // Low registers, Rd == Rn, kCcKeep.
1433   __ AddConstant(R0, R0, 0, AL, kCcKeep);             // Nothing.
1434   __ AddConstant(R1, R1, 1, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1435   __ AddConstant(R0, R0, 7, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1436   __ AddConstant(R1, R1, 8, AL, kCcKeep);             // 32-bit ADD, encoding T3.
1437   __ AddConstant(R0, R0, 255, AL, kCcKeep);           // 32-bit ADD, encoding T3.
1438   __ AddConstant(R1, R1, 256, AL, kCcKeep);           // 32-bit ADD, encoding T3.
1439   __ AddConstant(R0, R0, 257, AL, kCcKeep);           // 32-bit ADD, encoding T4.
1440   __ AddConstant(R1, R1, 0xfff, AL, kCcKeep);         // 32-bit ADD, encoding T4.
1441   __ AddConstant(R0, R0, 0x1000, AL, kCcKeep);        // 32-bit ADD, encoding T3.
1442   __ AddConstant(R1, R1, 0x1001, AL, kCcKeep);        // MVN+SUB.
1443   __ AddConstant(R0, R0, 0x1002, AL, kCcKeep);        // MOVW+ADD.
1444   __ AddConstant(R1, R1, 0xffff, AL, kCcKeep);        // MOVW+ADD.
1445   __ AddConstant(R0, R0, 0x10000, AL, kCcKeep);       // 32-bit ADD, encoding T3.
1446   __ AddConstant(R1, R1, 0x10001, AL, kCcKeep);       // 32-bit ADD, encoding T3.
1447   __ AddConstant(R0, R0, 0x10002, AL, kCcKeep);       // MVN+SUB.
1448   __ AddConstant(R1, R1, 0x10003, AL, kCcKeep);       // MOVW+MOVT+ADD.
1449   __ AddConstant(R0, R0, -1, AL, kCcKeep);            // 32-bit ADD, encoding T3.
1450   __ AddConstant(R1, R1, -7, AL, kCcKeep);            // 32-bit SUB, encoding T3.
1451   __ AddConstant(R0, R0, -8, AL, kCcKeep);            // 32-bit SUB, encoding T3.
1452   __ AddConstant(R1, R1, -255, AL, kCcKeep);          // 32-bit SUB, encoding T3.
1453   __ AddConstant(R0, R0, -256, AL, kCcKeep);          // 32-bit SUB, encoding T3.
1454   __ AddConstant(R1, R1, -257, AL, kCcKeep);          // 32-bit SUB, encoding T4.
1455   __ AddConstant(R0, R0, -0xfff, AL, kCcKeep);        // 32-bit SUB, encoding T4.
1456   __ AddConstant(R1, R1, -0x1000, AL, kCcKeep);       // 32-bit SUB, encoding T3.
1457   __ AddConstant(R0, R0, -0x1001, AL, kCcKeep);       // MVN+ADD.
1458   __ AddConstant(R1, R1, -0x1002, AL, kCcKeep);       // MOVW+SUB.
1459   __ AddConstant(R0, R0, -0xffff, AL, kCcKeep);       // MOVW+SUB.
1460   __ AddConstant(R1, R1, -0x10000, AL, kCcKeep);      // 32-bit SUB, encoding T3.
1461   __ AddConstant(R0, R0, -0x10001, AL, kCcKeep);      // 32-bit SUB, encoding T3.
1462   __ AddConstant(R1, R1, -0x10002, AL, kCcKeep);      // MVN+ADD.
1463   __ AddConstant(R0, R0, -0x10003, AL, kCcKeep);      // MOVW+MOVT+ADD.
1464 
1465   // Low registers, Rd != Rn, kCcSet.
1466   __ AddConstant(R0, R1, 0, AL, kCcSet);              // 16-bit ADDS.
1467   __ AddConstant(R0, R1, 1, AL, kCcSet);              // 16-bit ADDS.
1468   __ AddConstant(R0, R1, 7, AL, kCcSet);              // 16-bit ADDS.
1469   __ AddConstant(R0, R1, 8, AL, kCcSet);              // 32-bit ADDS, encoding T3.
1470   __ AddConstant(R0, R1, 255, AL, kCcSet);            // 32-bit ADDS, encoding T3.
1471   __ AddConstant(R0, R1, 256, AL, kCcSet);            // 32-bit ADDS, encoding T3.
1472   __ AddConstant(R0, R1, 257, AL, kCcSet);            // MVN+SUBS.
1473   __ AddConstant(R0, R1, 0xfff, AL, kCcSet);          // MOVW+ADDS.
1474   __ AddConstant(R0, R1, 0x1000, AL, kCcSet);         // 32-bit ADDS, encoding T3.
1475   __ AddConstant(R0, R1, 0x1001, AL, kCcSet);         // MVN+SUBS.
1476   __ AddConstant(R0, R1, 0x1002, AL, kCcSet);         // MOVW+ADDS.
1477   __ AddConstant(R0, R1, 0xffff, AL, kCcSet);         // MOVW+ADDS.
1478   __ AddConstant(R0, R1, 0x10000, AL, kCcSet);        // 32-bit ADDS, encoding T3.
1479   __ AddConstant(R0, R1, 0x10001, AL, kCcSet);        // 32-bit ADDS, encoding T3.
1480   __ AddConstant(R0, R1, 0x10002, AL, kCcSet);        // MVN+SUBS.
1481   __ AddConstant(R0, R1, 0x10003, AL, kCcSet);        // MOVW+MOVT+ADDS.
1482   __ AddConstant(R0, R1, -1, AL, kCcSet);             // 16-bit SUBS.
1483   __ AddConstant(R0, R1, -7, AL, kCcSet);             // 16-bit SUBS.
1484   __ AddConstant(R0, R1, -8, AL, kCcSet);             // 32-bit SUBS, encoding T3.
1485   __ AddConstant(R0, R1, -255, AL, kCcSet);           // 32-bit SUBS, encoding T3.
1486   __ AddConstant(R0, R1, -256, AL, kCcSet);           // 32-bit SUBS, encoding T3.
1487   __ AddConstant(R0, R1, -257, AL, kCcSet);           // MVN+ADDS.
1488   __ AddConstant(R0, R1, -0xfff, AL, kCcSet);         // MOVW+SUBS.
1489   __ AddConstant(R0, R1, -0x1000, AL, kCcSet);        // 32-bit SUBS, encoding T3.
1490   __ AddConstant(R0, R1, -0x1001, AL, kCcSet);        // MVN+ADDS.
1491   __ AddConstant(R0, R1, -0x1002, AL, kCcSet);        // MOVW+SUBS.
1492   __ AddConstant(R0, R1, -0xffff, AL, kCcSet);        // MOVW+SUBS.
1493   __ AddConstant(R0, R1, -0x10000, AL, kCcSet);       // 32-bit SUBS, encoding T3.
1494   __ AddConstant(R0, R1, -0x10001, AL, kCcSet);       // 32-bit SUBS, encoding T3.
1495   __ AddConstant(R0, R1, -0x10002, AL, kCcSet);       // MVN+ADDS.
1496   __ AddConstant(R0, R1, -0x10003, AL, kCcSet);       // MOVW+MOVT+ADDS.
1497 
1498   // Low registers, Rd == Rn, kCcSet.
1499   __ AddConstant(R0, R0, 0, AL, kCcSet);              // 16-bit ADDS, encoding T2.
1500   __ AddConstant(R1, R1, 1, AL, kCcSet);              // 16-bit ADDS, encoding T2.
1501   __ AddConstant(R0, R0, 7, AL, kCcSet);              // 16-bit ADDS, encoding T2.
1502   __ AddConstant(R1, R1, 8, AL, kCcSet);              // 16-bit ADDS, encoding T2.
1503   __ AddConstant(R0, R0, 255, AL, kCcSet);            // 16-bit ADDS, encoding T2.
1504   __ AddConstant(R1, R1, 256, AL, kCcSet);            // 32-bit ADDS, encoding T3.
1505   __ AddConstant(R0, R0, 257, AL, kCcSet);            // MVN+SUBS.
1506   __ AddConstant(R1, R1, 0xfff, AL, kCcSet);          // MOVW+ADDS.
1507   __ AddConstant(R0, R0, 0x1000, AL, kCcSet);         // 32-bit ADDS, encoding T3.
1508   __ AddConstant(R1, R1, 0x1001, AL, kCcSet);         // MVN+SUBS.
1509   __ AddConstant(R0, R0, 0x1002, AL, kCcSet);         // MOVW+ADDS.
1510   __ AddConstant(R1, R1, 0xffff, AL, kCcSet);         // MOVW+ADDS.
1511   __ AddConstant(R0, R0, 0x10000, AL, kCcSet);        // 32-bit ADDS, encoding T3.
1512   __ AddConstant(R1, R1, 0x10001, AL, kCcSet);        // 32-bit ADDS, encoding T3.
1513   __ AddConstant(R0, R0, 0x10002, AL, kCcSet);        // MVN+SUBS.
1514   __ AddConstant(R1, R1, 0x10003, AL, kCcSet);        // MOVW+MOVT+ADDS.
1515   __ AddConstant(R0, R0, -1, AL, kCcSet);             // 16-bit SUBS, encoding T2.
1516   __ AddConstant(R1, R1, -7, AL, kCcSet);             // 16-bit SUBS, encoding T2.
1517   __ AddConstant(R0, R0, -8, AL, kCcSet);             // 16-bit SUBS, encoding T2.
1518   __ AddConstant(R1, R1, -255, AL, kCcSet);           // 16-bit SUBS, encoding T2.
1519   __ AddConstant(R0, R0, -256, AL, kCcSet);           // 32-bit SUB, encoding T3.
1520   __ AddConstant(R1, R1, -257, AL, kCcSet);           // MNV+ADDS.
1521   __ AddConstant(R0, R0, -0xfff, AL, kCcSet);         // MOVW+SUBS.
1522   __ AddConstant(R1, R1, -0x1000, AL, kCcSet);        // 32-bit SUB, encoding T3.
1523   __ AddConstant(R0, R0, -0x1001, AL, kCcSet);        // MVN+ADDS.
1524   __ AddConstant(R1, R1, -0x1002, AL, kCcSet);        // MOVW+SUBS.
1525   __ AddConstant(R0, R0, -0xffff, AL, kCcSet);        // MOVW+SUBS.
1526   __ AddConstant(R1, R1, -0x10000, AL, kCcSet);       // 32-bit SUBS, encoding T3.
1527   __ AddConstant(R0, R0, -0x10001, AL, kCcSet);       // 32-bit SUBS, encoding T3.
1528   __ AddConstant(R1, R1, -0x10002, AL, kCcSet);       // MVN+ADDS.
1529   __ AddConstant(R0, R0, -0x10003, AL, kCcSet);       // MOVW+MOVT+ADDS.
1530 
1531   __ it(EQ);
1532   __ AddConstant(R0, R1, 1, EQ, kCcSet);              // 32-bit ADDS, encoding T3.
1533   __ it(NE);
1534   __ AddConstant(R0, R1, 1, NE, kCcKeep);             // 16-bit ADDS, encoding T1.
1535   __ it(GE);
1536   __ AddConstant(R0, R0, 1, GE, kCcSet);              // 32-bit ADDS, encoding T3.
1537   __ it(LE);
1538   __ AddConstant(R0, R0, 1, LE, kCcKeep);             // 16-bit ADDS, encoding T2.
1539 
1540   EmitAndCheck(&assembler, "AddConstant");
1541 }
1542 
TEST_F(Thumb2AssemblerTest,CmpConstant)1543 TEST_F(Thumb2AssemblerTest, CmpConstant) {
1544   __ CmpConstant(R0, 0);                              // 16-bit CMP.
1545   __ CmpConstant(R1, 1);                              // 16-bit CMP.
1546   __ CmpConstant(R0, 7);                              // 16-bit CMP.
1547   __ CmpConstant(R1, 8);                              // 16-bit CMP.
1548   __ CmpConstant(R0, 255);                            // 16-bit CMP.
1549   __ CmpConstant(R1, 256);                            // 32-bit CMP.
1550   __ CmpConstant(R0, 257);                            // MNV+CMN.
1551   __ CmpConstant(R1, 0xfff);                          // MOVW+CMP.
1552   __ CmpConstant(R0, 0x1000);                         // 32-bit CMP.
1553   __ CmpConstant(R1, 0x1001);                         // MNV+CMN.
1554   __ CmpConstant(R0, 0x1002);                         // MOVW+CMP.
1555   __ CmpConstant(R1, 0xffff);                         // MOVW+CMP.
1556   __ CmpConstant(R0, 0x10000);                        // 32-bit CMP.
1557   __ CmpConstant(R1, 0x10001);                        // 32-bit CMP.
1558   __ CmpConstant(R0, 0x10002);                        // MVN+CMN.
1559   __ CmpConstant(R1, 0x10003);                        // MOVW+MOVT+CMP.
1560   __ CmpConstant(R0, -1);                             // 32-bit CMP.
1561   __ CmpConstant(R1, -7);                             // CMN.
1562   __ CmpConstant(R0, -8);                             // CMN.
1563   __ CmpConstant(R1, -255);                           // CMN.
1564   __ CmpConstant(R0, -256);                           // CMN.
1565   __ CmpConstant(R1, -257);                           // MNV+CMP.
1566   __ CmpConstant(R0, -0xfff);                         // MOVW+CMN.
1567   __ CmpConstant(R1, -0x1000);                        // CMN.
1568   __ CmpConstant(R0, -0x1001);                        // MNV+CMP.
1569   __ CmpConstant(R1, -0x1002);                        // MOVW+CMN.
1570   __ CmpConstant(R0, -0xffff);                        // MOVW+CMN.
1571   __ CmpConstant(R1, -0x10000);                       // CMN.
1572   __ CmpConstant(R0, -0x10001);                       // CMN.
1573   __ CmpConstant(R1, -0x10002);                       // MVN+CMP.
1574   __ CmpConstant(R0, -0x10003);                       // MOVW+MOVT+CMP.
1575 
1576   __ CmpConstant(R8, 0);                              // 32-bit CMP.
1577   __ CmpConstant(R9, 1);                              // 32-bit CMP.
1578   __ CmpConstant(R8, 7);                              // 32-bit CMP.
1579   __ CmpConstant(R9, 8);                              // 32-bit CMP.
1580   __ CmpConstant(R8, 255);                            // 32-bit CMP.
1581   __ CmpConstant(R9, 256);                            // 32-bit CMP.
1582   __ CmpConstant(R8, 257);                            // MNV+CMN
1583   __ CmpConstant(R9, 0xfff);                          // MOVW+CMP.
1584   __ CmpConstant(R8, 0x1000);                         // 32-bit CMP.
1585   __ CmpConstant(R9, 0x1001);                         // MVN+CMN.
1586   __ CmpConstant(R8, 0x1002);                         // MOVW+CMP.
1587   __ CmpConstant(R9, 0xffff);                         // MOVW+CMP.
1588   __ CmpConstant(R8, 0x10000);                        // 32-bit CMP.
1589   __ CmpConstant(R9, 0x10001);                        // 32-bit CMP.
1590   __ CmpConstant(R8, 0x10002);                        // MVN+CMN.
1591   __ CmpConstant(R9, 0x10003);                        // MOVW+MOVT+CMP.
1592   __ CmpConstant(R8, -1);                             // 32-bit CMP
1593   __ CmpConstant(R9, -7);                             // CMN.
1594   __ CmpConstant(R8, -8);                             // CMN.
1595   __ CmpConstant(R9, -255);                           // CMN.
1596   __ CmpConstant(R8, -256);                           // CMN.
1597   __ CmpConstant(R9, -257);                           // MNV+CMP.
1598   __ CmpConstant(R8, -0xfff);                         // MOVW+CMN.
1599   __ CmpConstant(R9, -0x1000);                        // CMN.
1600   __ CmpConstant(R8, -0x1001);                        // MVN+CMP.
1601   __ CmpConstant(R9, -0x1002);                        // MOVW+CMN.
1602   __ CmpConstant(R8, -0xffff);                        // MOVW+CMN.
1603   __ CmpConstant(R9, -0x10000);                       // CMN.
1604   __ CmpConstant(R8, -0x10001);                       // CMN.
1605   __ CmpConstant(R9, -0x10002);                       // MVN+CMP.
1606   __ CmpConstant(R8, -0x10003);                       // MOVW+MOVT+CMP.
1607 
1608   EmitAndCheck(&assembler, "CmpConstant");
1609 }
1610 
1611 #undef __
1612 }  // namespace arm
1613 }  // namespace art
1614