1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * This file is included by Codegen-armv5te-vfp.c, and implements architecture
19 * variant-specific code.
20 */
21
22 #define USE_IN_CACHE_HANDLER 1
23
24 /*
25 * Determine the initial instruction set to be used for this trace.
26 * Later components may decide to change this.
27 */
dvmCompilerInstructionSet(CompilationUnit * cUnit)28 JitInstructionSetType dvmCompilerInstructionSet(CompilationUnit *cUnit)
29 {
30 return DALVIK_JIT_THUMB;
31 }
32
33 /*
34 * Jump to the out-of-line handler in ARM mode to finish executing the
35 * remaining of more complex instructions.
36 */
genDispatchToHandler(CompilationUnit * cUnit,TemplateOpCode opCode)37 static void genDispatchToHandler(CompilationUnit *cUnit, TemplateOpCode opCode)
38 {
39 #if USE_IN_CACHE_HANDLER
40 /*
41 * NOTE - In practice BLX only needs one operand, but since the assembler
42 * may abort itself and retry due to other out-of-range conditions we
43 * cannot really use operand[0] to store the absolute target address since
44 * it may get clobbered by the final relative offset. Therefore,
45 * we fake BLX_1 is a two operand instruction and the absolute target
46 * address is stored in operand[1].
47 */
48 newLIR2(cUnit, THUMB_BLX_1,
49 (int) gDvmJit.codeCache + templateEntryOffsets[opCode],
50 (int) gDvmJit.codeCache + templateEntryOffsets[opCode]);
51 newLIR2(cUnit, THUMB_BLX_2,
52 (int) gDvmJit.codeCache + templateEntryOffsets[opCode],
53 (int) gDvmJit.codeCache + templateEntryOffsets[opCode]);
54 #else
55 /*
56 * In case we want to access the statically compiled handlers for
57 * debugging purposes, define USE_IN_CACHE_HANDLER to 0
58 */
59 void *templatePtr;
60
61 #define JIT_TEMPLATE(X) extern void dvmCompiler_TEMPLATE_##X();
62 #include "../../../template/armv5te-vfp/TemplateOpList.h"
63 #undef JIT_TEMPLATE
64 switch (opCode) {
65 #define JIT_TEMPLATE(X) \
66 case TEMPLATE_##X: { templatePtr = dvmCompiler_TEMPLATE_##X; break; }
67 #include "../../../template/armv5te-vfp/TemplateOpList.h"
68 #undef JIT_TEMPLATE
69 default: templatePtr = NULL;
70 }
71 loadConstant(cUnit, r7, (int) templatePtr);
72 newLIR1(cUnit, THUMB_BLX_R, r7);
73 #endif
74 }
75
76 /* Architecture-specific initializations and checks go here */
dvmCompilerArchInit(void)77 bool dvmCompilerArchInit(void)
78 {
79 /* First, declare dvmCompiler_TEMPLATE_XXX for each template */
80 #define JIT_TEMPLATE(X) extern void dvmCompiler_TEMPLATE_##X();
81 #include "../../../template/armv5te-vfp/TemplateOpList.h"
82 #undef JIT_TEMPLATE
83
84 int i = 0;
85 extern void dvmCompilerTemplateStart(void);
86
87 /*
88 * Then, populate the templateEntryOffsets array with the offsets from the
89 * the dvmCompilerTemplateStart symbol for each template.
90 */
91 #define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
92 (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
93 #include "../../../template/armv5te-vfp/TemplateOpList.h"
94 #undef JIT_TEMPLATE
95
96 /* Codegen-specific assumptions */
97 assert(offsetof(ClassObject, vtable) < 128 &&
98 (offsetof(ClassObject, vtable) & 0x3) == 0);
99 assert(offsetof(ArrayObject, length) < 128 &&
100 (offsetof(ArrayObject, length) & 0x3) == 0);
101 assert(offsetof(ArrayObject, contents) < 256);
102
103 /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
104 assert(sizeof(StackSaveArea) < 236);
105
106 /*
107 * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points
108 * that codegen may access, make sure that the offset from the top of the
109 * struct is less than 108.
110 */
111 assert(offsetof(InterpState, jitToInterpEntries) < 108);
112 return true;
113 }
114
genInlineSqrt(CompilationUnit * cUnit,MIR * mir)115 static bool genInlineSqrt(CompilationUnit *cUnit, MIR *mir)
116 {
117 int offset = offsetof(InterpState, retval);
118 OpCode opCode = mir->dalvikInsn.opCode;
119 int vSrc = mir->dalvikInsn.vA;
120 loadValueAddress(cUnit, vSrc, r2);
121 genDispatchToHandler(cUnit, TEMPLATE_SQRT_DOUBLE_VFP);
122 newLIR3(cUnit, THUMB_STR_RRI5, r0, rGLUE, offset >> 2);
123 newLIR3(cUnit, THUMB_STR_RRI5, r1, rGLUE, (offset >> 2) + 1);
124 return false;
125 }
126
genInlineCos(CompilationUnit * cUnit,MIR * mir)127 static bool genInlineCos(CompilationUnit *cUnit, MIR *mir)
128 {
129 return false;
130 }
131
genInlineSin(CompilationUnit * cUnit,MIR * mir)132 static bool genInlineSin(CompilationUnit *cUnit, MIR *mir)
133 {
134 return false;
135 }
136
genArithOpFloat(CompilationUnit * cUnit,MIR * mir,int vDest,int vSrc1,int vSrc2)137 static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, int vDest,
138 int vSrc1, int vSrc2)
139 {
140 TemplateOpCode opCode;
141
142 /*
143 * Don't attempt to optimize register usage since these opcodes call out to
144 * the handlers.
145 */
146 switch (mir->dalvikInsn.opCode) {
147 case OP_ADD_FLOAT_2ADDR:
148 case OP_ADD_FLOAT:
149 opCode = TEMPLATE_ADD_FLOAT_VFP;
150 break;
151 case OP_SUB_FLOAT_2ADDR:
152 case OP_SUB_FLOAT:
153 opCode = TEMPLATE_SUB_FLOAT_VFP;
154 break;
155 case OP_DIV_FLOAT_2ADDR:
156 case OP_DIV_FLOAT:
157 opCode = TEMPLATE_DIV_FLOAT_VFP;
158 break;
159 case OP_MUL_FLOAT_2ADDR:
160 case OP_MUL_FLOAT:
161 opCode = TEMPLATE_MUL_FLOAT_VFP;
162 break;
163 case OP_REM_FLOAT_2ADDR:
164 case OP_REM_FLOAT:
165 case OP_NEG_FLOAT: {
166 return genArithOpFloatPortable(cUnit, mir, vDest,
167 vSrc1, vSrc2);
168 }
169 default:
170 return true;
171 }
172 loadValueAddress(cUnit, vDest, r0);
173 loadValueAddress(cUnit, vSrc1, r1);
174 loadValueAddress(cUnit, vSrc2, r2);
175 genDispatchToHandler(cUnit, opCode);
176 return false;
177 }
178
genArithOpDouble(CompilationUnit * cUnit,MIR * mir,int vDest,int vSrc1,int vSrc2)179 static bool genArithOpDouble(CompilationUnit *cUnit, MIR *mir, int vDest,
180 int vSrc1, int vSrc2)
181 {
182 TemplateOpCode opCode;
183
184 /*
185 * Don't attempt to optimize register usage since these opcodes call out to
186 * the handlers.
187 */
188 switch (mir->dalvikInsn.opCode) {
189 case OP_ADD_DOUBLE_2ADDR:
190 case OP_ADD_DOUBLE:
191 opCode = TEMPLATE_ADD_DOUBLE_VFP;
192 break;
193 case OP_SUB_DOUBLE_2ADDR:
194 case OP_SUB_DOUBLE:
195 opCode = TEMPLATE_SUB_DOUBLE_VFP;
196 break;
197 case OP_DIV_DOUBLE_2ADDR:
198 case OP_DIV_DOUBLE:
199 opCode = TEMPLATE_DIV_DOUBLE_VFP;
200 break;
201 case OP_MUL_DOUBLE_2ADDR:
202 case OP_MUL_DOUBLE:
203 opCode = TEMPLATE_MUL_DOUBLE_VFP;
204 break;
205 case OP_REM_DOUBLE_2ADDR:
206 case OP_REM_DOUBLE:
207 case OP_NEG_DOUBLE: {
208 return genArithOpDoublePortable(cUnit, mir, vDest,
209 vSrc1, vSrc2);
210 }
211 default:
212 return true;
213 }
214 loadValueAddress(cUnit, vDest, r0);
215 loadValueAddress(cUnit, vSrc1, r1);
216 loadValueAddress(cUnit, vSrc2, r2);
217 genDispatchToHandler(cUnit, opCode);
218 return false;
219 }
220
genConversion(CompilationUnit * cUnit,MIR * mir)221 static bool genConversion(CompilationUnit *cUnit, MIR *mir)
222 {
223 OpCode opCode = mir->dalvikInsn.opCode;
224 int vSrc1Dest = mir->dalvikInsn.vA;
225 int vSrc2 = mir->dalvikInsn.vB;
226 TemplateOpCode template;
227
228 switch (opCode) {
229 case OP_INT_TO_FLOAT:
230 template = TEMPLATE_INT_TO_FLOAT_VFP;
231 break;
232 case OP_FLOAT_TO_INT:
233 template = TEMPLATE_FLOAT_TO_INT_VFP;
234 break;
235 case OP_DOUBLE_TO_FLOAT:
236 template = TEMPLATE_DOUBLE_TO_FLOAT_VFP;
237 break;
238 case OP_FLOAT_TO_DOUBLE:
239 template = TEMPLATE_FLOAT_TO_DOUBLE_VFP;
240 break;
241 case OP_INT_TO_DOUBLE:
242 template = TEMPLATE_INT_TO_DOUBLE_VFP;
243 break;
244 case OP_DOUBLE_TO_INT:
245 template = TEMPLATE_DOUBLE_TO_INT_VFP;
246 break;
247 case OP_FLOAT_TO_LONG:
248 case OP_LONG_TO_FLOAT:
249 case OP_DOUBLE_TO_LONG:
250 case OP_LONG_TO_DOUBLE:
251 return genConversionPortable(cUnit, mir);
252 default:
253 return true;
254 }
255 loadValueAddress(cUnit, vSrc1Dest, r0);
256 loadValueAddress(cUnit, vSrc2, r1);
257 genDispatchToHandler(cUnit, template);
258 return false;
259 }
260
genCmpX(CompilationUnit * cUnit,MIR * mir,int vDest,int vSrc1,int vSrc2)261 static bool genCmpX(CompilationUnit *cUnit, MIR *mir, int vDest, int vSrc1,
262 int vSrc2)
263 {
264 TemplateOpCode template;
265
266 /*
267 * Don't attempt to optimize register usage since these opcodes call out to
268 * the handlers.
269 */
270 switch(mir->dalvikInsn.opCode) {
271 case OP_CMPL_FLOAT:
272 template = TEMPLATE_CMPL_FLOAT_VFP;
273 break;
274 case OP_CMPG_FLOAT:
275 template = TEMPLATE_CMPG_FLOAT_VFP;
276 break;
277 case OP_CMPL_DOUBLE:
278 template = TEMPLATE_CMPL_DOUBLE_VFP;
279 break;
280 case OP_CMPG_DOUBLE:
281 template = TEMPLATE_CMPG_DOUBLE_VFP;
282 break;
283 default:
284 return true;
285 }
286 loadValueAddress(cUnit, vSrc1, r0);
287 loadValueAddress(cUnit, vSrc2, r1);
288 genDispatchToHandler(cUnit, template);
289 storeValue(cUnit, r0, vDest, r1);
290 return false;
291 }
292