• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "local_value_numbering.h"
18 #include "dataflow_iterator-inl.h"
19 
20 namespace art {
21 
22 /*
23  * Main table containing data flow attributes for each bytecode. The
24  * first kNumPackedOpcodes entries are for Dalvik bytecode
25  * instructions, where extended opcode at the MIR level are appended
26  * afterwards.
27  *
28  * TODO - many optimization flags are incomplete - they will only limit the
29  * scope of optimizations but will not cause mis-optimizations.
30  */
31 const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
32   // 00 NOP
33   DF_NOP,
34 
35   // 01 MOVE vA, vB
36   DF_DA | DF_UB | DF_IS_MOVE,
37 
38   // 02 MOVE_FROM16 vAA, vBBBB
39   DF_DA | DF_UB | DF_IS_MOVE,
40 
41   // 03 MOVE_16 vAAAA, vBBBB
42   DF_DA | DF_UB | DF_IS_MOVE,
43 
44   // 04 MOVE_WIDE vA, vB
45   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
46 
47   // 05 MOVE_WIDE_FROM16 vAA, vBBBB
48   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
49 
50   // 06 MOVE_WIDE_16 vAAAA, vBBBB
51   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
52 
53   // 07 MOVE_OBJECT vA, vB
54   DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
55 
56   // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
57   DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
58 
59   // 09 MOVE_OBJECT_16 vAAAA, vBBBB
60   DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
61 
62   // 0A MOVE_RESULT vAA
63   DF_DA,
64 
65   // 0B MOVE_RESULT_WIDE vAA
66   DF_DA | DF_A_WIDE,
67 
68   // 0C MOVE_RESULT_OBJECT vAA
69   DF_DA | DF_REF_A,
70 
71   // 0D MOVE_EXCEPTION vAA
72   DF_DA | DF_REF_A | DF_NON_NULL_DST,
73 
74   // 0E RETURN_VOID
75   DF_NOP,
76 
77   // 0F RETURN vAA
78   DF_UA,
79 
80   // 10 RETURN_WIDE vAA
81   DF_UA | DF_A_WIDE,
82 
83   // 11 RETURN_OBJECT vAA
84   DF_UA | DF_REF_A,
85 
86   // 12 CONST_4 vA, #+B
87   DF_DA | DF_SETS_CONST,
88 
89   // 13 CONST_16 vAA, #+BBBB
90   DF_DA | DF_SETS_CONST,
91 
92   // 14 CONST vAA, #+BBBBBBBB
93   DF_DA | DF_SETS_CONST,
94 
95   // 15 CONST_HIGH16 VAA, #+BBBB0000
96   DF_DA | DF_SETS_CONST,
97 
98   // 16 CONST_WIDE_16 vAA, #+BBBB
99   DF_DA | DF_A_WIDE | DF_SETS_CONST,
100 
101   // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
102   DF_DA | DF_A_WIDE | DF_SETS_CONST,
103 
104   // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
105   DF_DA | DF_A_WIDE | DF_SETS_CONST,
106 
107   // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
108   DF_DA | DF_A_WIDE | DF_SETS_CONST,
109 
110   // 1A CONST_STRING vAA, string@BBBB
111   DF_DA | DF_REF_A | DF_NON_NULL_DST,
112 
113   // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
114   DF_DA | DF_REF_A | DF_NON_NULL_DST,
115 
116   // 1C CONST_CLASS vAA, type@BBBB
117   DF_DA | DF_REF_A | DF_NON_NULL_DST,
118 
119   // 1D MONITOR_ENTER vAA
120   DF_UA | DF_NULL_CHK_A | DF_REF_A,
121 
122   // 1E MONITOR_EXIT vAA
123   DF_UA | DF_NULL_CHK_A | DF_REF_A,
124 
125   // 1F CHK_CAST vAA, type@BBBB
126   DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
127 
128   // 20 INSTANCE_OF vA, vB, type@CCCC
129   DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
130 
131   // 21 ARRAY_LENGTH vA, vB
132   DF_DA | DF_UB | DF_NULL_CHK_B | DF_CORE_A | DF_REF_B,
133 
134   // 22 NEW_INSTANCE vAA, type@BBBB
135   DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
136 
137   // 23 NEW_ARRAY vA, vB, type@CCCC
138   DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
139 
140   // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
141   DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
142 
143   // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
144   DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
145 
146   // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
147   DF_UA | DF_REF_A | DF_UMS,
148 
149   // 27 THROW vAA
150   DF_UA | DF_REF_A | DF_UMS,
151 
152   // 28 GOTO
153   DF_NOP,
154 
155   // 29 GOTO_16
156   DF_NOP,
157 
158   // 2A GOTO_32
159   DF_NOP,
160 
161   // 2B PACKED_SWITCH vAA, +BBBBBBBB
162   DF_UA | DF_CORE_A,
163 
164   // 2C SPARSE_SWITCH vAA, +BBBBBBBB
165   DF_UA | DF_CORE_A,
166 
167   // 2D CMPL_FLOAT vAA, vBB, vCC
168   DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
169 
170   // 2E CMPG_FLOAT vAA, vBB, vCC
171   DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
172 
173   // 2F CMPL_DOUBLE vAA, vBB, vCC
174   DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
175 
176   // 30 CMPG_DOUBLE vAA, vBB, vCC
177   DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
178 
179   // 31 CMP_LONG vAA, vBB, vCC
180   DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
181 
182   // 32 IF_EQ vA, vB, +CCCC
183   DF_UA | DF_UB | DF_SAME_TYPE_AB,
184 
185   // 33 IF_NE vA, vB, +CCCC
186   DF_UA | DF_UB | DF_SAME_TYPE_AB,
187 
188   // 34 IF_LT vA, vB, +CCCC
189   DF_UA | DF_UB | DF_SAME_TYPE_AB,
190 
191   // 35 IF_GE vA, vB, +CCCC
192   DF_UA | DF_UB | DF_SAME_TYPE_AB,
193 
194   // 36 IF_GT vA, vB, +CCCC
195   DF_UA | DF_UB | DF_SAME_TYPE_AB,
196 
197   // 37 IF_LE vA, vB, +CCCC
198   DF_UA | DF_UB | DF_SAME_TYPE_AB,
199 
200   // 38 IF_EQZ vAA, +BBBB
201   DF_UA,
202 
203   // 39 IF_NEZ vAA, +BBBB
204   DF_UA,
205 
206   // 3A IF_LTZ vAA, +BBBB
207   DF_UA,
208 
209   // 3B IF_GEZ vAA, +BBBB
210   DF_UA,
211 
212   // 3C IF_GTZ vAA, +BBBB
213   DF_UA,
214 
215   // 3D IF_LEZ vAA, +BBBB
216   DF_UA,
217 
218   // 3E UNUSED_3E
219   DF_NOP,
220 
221   // 3F UNUSED_3F
222   DF_NOP,
223 
224   // 40 UNUSED_40
225   DF_NOP,
226 
227   // 41 UNUSED_41
228   DF_NOP,
229 
230   // 42 UNUSED_42
231   DF_NOP,
232 
233   // 43 UNUSED_43
234   DF_NOP,
235 
236   // 44 AGET vAA, vBB, vCC
237   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
238 
239   // 45 AGET_WIDE vAA, vBB, vCC
240   DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
241 
242   // 46 AGET_OBJECT vAA, vBB, vCC
243   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
244 
245   // 47 AGET_BOOLEAN vAA, vBB, vCC
246   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
247 
248   // 48 AGET_BYTE vAA, vBB, vCC
249   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
250 
251   // 49 AGET_CHAR vAA, vBB, vCC
252   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
253 
254   // 4A AGET_SHORT vAA, vBB, vCC
255   DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
256 
257   // 4B APUT vAA, vBB, vCC
258   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
259 
260   // 4C APUT_WIDE vAA, vBB, vCC
261   DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
262 
263   // 4D APUT_OBJECT vAA, vBB, vCC
264   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
265 
266   // 4E APUT_BOOLEAN vAA, vBB, vCC
267   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
268 
269   // 4F APUT_BYTE vAA, vBB, vCC
270   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
271 
272   // 50 APUT_CHAR vAA, vBB, vCC
273   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
274 
275   // 51 APUT_SHORT vAA, vBB, vCC
276   DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
277 
278   // 52 IGET vA, vB, field@CCCC
279   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
280 
281   // 53 IGET_WIDE vA, vB, field@CCCC
282   DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
283 
284   // 54 IGET_OBJECT vA, vB, field@CCCC
285   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
286 
287   // 55 IGET_BOOLEAN vA, vB, field@CCCC
288   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
289 
290   // 56 IGET_BYTE vA, vB, field@CCCC
291   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
292 
293   // 57 IGET_CHAR vA, vB, field@CCCC
294   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
295 
296   // 58 IGET_SHORT vA, vB, field@CCCC
297   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
298 
299   // 59 IPUT vA, vB, field@CCCC
300   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
301 
302   // 5A IPUT_WIDE vA, vB, field@CCCC
303   DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
304 
305   // 5B IPUT_OBJECT vA, vB, field@CCCC
306   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
307 
308   // 5C IPUT_BOOLEAN vA, vB, field@CCCC
309   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
310 
311   // 5D IPUT_BYTE vA, vB, field@CCCC
312   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
313 
314   // 5E IPUT_CHAR vA, vB, field@CCCC
315   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
316 
317   // 5F IPUT_SHORT vA, vB, field@CCCC
318   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
319 
320   // 60 SGET vAA, field@BBBB
321   DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
322 
323   // 61 SGET_WIDE vAA, field@BBBB
324   DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
325 
326   // 62 SGET_OBJECT vAA, field@BBBB
327   DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
328 
329   // 63 SGET_BOOLEAN vAA, field@BBBB
330   DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
331 
332   // 64 SGET_BYTE vAA, field@BBBB
333   DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
334 
335   // 65 SGET_CHAR vAA, field@BBBB
336   DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
337 
338   // 66 SGET_SHORT vAA, field@BBBB
339   DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
340 
341   // 67 SPUT vAA, field@BBBB
342   DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
343 
344   // 68 SPUT_WIDE vAA, field@BBBB
345   DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
346 
347   // 69 SPUT_OBJECT vAA, field@BBBB
348   DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
349 
350   // 6A SPUT_BOOLEAN vAA, field@BBBB
351   DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
352 
353   // 6B SPUT_BYTE vAA, field@BBBB
354   DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
355 
356   // 6C SPUT_CHAR vAA, field@BBBB
357   DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
358 
359   // 6D SPUT_SHORT vAA, field@BBBB
360   DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
361 
362   // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
363   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
364 
365   // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
366   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
367 
368   // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
369   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
370 
371   // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
372   DF_FORMAT_35C | DF_CLINIT | DF_UMS,
373 
374   // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
375   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
376 
377   // 73 RETURN_VOID_NO_BARRIER
378   DF_NOP,
379 
380   // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
381   DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
382 
383   // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
384   DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
385 
386   // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
387   DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
388 
389   // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
390   DF_FORMAT_3RC | DF_CLINIT | DF_UMS,
391 
392   // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
393   DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
394 
395   // 79 UNUSED_79
396   DF_NOP,
397 
398   // 7A UNUSED_7A
399   DF_NOP,
400 
401   // 7B NEG_INT vA, vB
402   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
403 
404   // 7C NOT_INT vA, vB
405   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
406 
407   // 7D NEG_LONG vA, vB
408   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
409 
410   // 7E NOT_LONG vA, vB
411   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
412 
413   // 7F NEG_FLOAT vA, vB
414   DF_DA | DF_UB | DF_FP_A | DF_FP_B,
415 
416   // 80 NEG_DOUBLE vA, vB
417   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
418 
419   // 81 INT_TO_LONG vA, vB
420   DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
421 
422   // 82 INT_TO_FLOAT vA, vB
423   DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
424 
425   // 83 INT_TO_DOUBLE vA, vB
426   DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
427 
428   // 84 LONG_TO_INT vA, vB
429   DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
430 
431   // 85 LONG_TO_FLOAT vA, vB
432   DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
433 
434   // 86 LONG_TO_DOUBLE vA, vB
435   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
436 
437   // 87 FLOAT_TO_INT vA, vB
438   DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
439 
440   // 88 FLOAT_TO_LONG vA, vB
441   DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
442 
443   // 89 FLOAT_TO_DOUBLE vA, vB
444   DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
445 
446   // 8A DOUBLE_TO_INT vA, vB
447   DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
448 
449   // 8B DOUBLE_TO_LONG vA, vB
450   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
451 
452   // 8C DOUBLE_TO_FLOAT vA, vB
453   DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
454 
455   // 8D INT_TO_BYTE vA, vB
456   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
457 
458   // 8E INT_TO_CHAR vA, vB
459   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
460 
461   // 8F INT_TO_SHORT vA, vB
462   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
463 
464   // 90 ADD_INT vAA, vBB, vCC
465   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
466 
467   // 91 SUB_INT vAA, vBB, vCC
468   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
469 
470   // 92 MUL_INT vAA, vBB, vCC
471   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
472 
473   // 93 DIV_INT vAA, vBB, vCC
474   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
475 
476   // 94 REM_INT vAA, vBB, vCC
477   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
478 
479   // 95 AND_INT vAA, vBB, vCC
480   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
481 
482   // 96 OR_INT vAA, vBB, vCC
483   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
484 
485   // 97 XOR_INT vAA, vBB, vCC
486   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
487 
488   // 98 SHL_INT vAA, vBB, vCC
489   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
490 
491   // 99 SHR_INT vAA, vBB, vCC
492   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
493 
494   // 9A USHR_INT vAA, vBB, vCC
495   DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
496 
497   // 9B ADD_LONG vAA, vBB, vCC
498   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
499 
500   // 9C SUB_LONG vAA, vBB, vCC
501   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
502 
503   // 9D MUL_LONG vAA, vBB, vCC
504   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
505 
506   // 9E DIV_LONG vAA, vBB, vCC
507   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
508 
509   // 9F REM_LONG vAA, vBB, vCC
510   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
511 
512   // A0 AND_LONG vAA, vBB, vCC
513   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
514 
515   // A1 OR_LONG vAA, vBB, vCC
516   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
517 
518   // A2 XOR_LONG vAA, vBB, vCC
519   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
520 
521   // A3 SHL_LONG vAA, vBB, vCC
522   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
523 
524   // A4 SHR_LONG vAA, vBB, vCC
525   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
526 
527   // A5 USHR_LONG vAA, vBB, vCC
528   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
529 
530   // A6 ADD_FLOAT vAA, vBB, vCC
531   DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
532 
533   // A7 SUB_FLOAT vAA, vBB, vCC
534   DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
535 
536   // A8 MUL_FLOAT vAA, vBB, vCC
537   DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
538 
539   // A9 DIV_FLOAT vAA, vBB, vCC
540   DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
541 
542   // AA REM_FLOAT vAA, vBB, vCC
543   DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
544 
545   // AB ADD_DOUBLE vAA, vBB, vCC
546   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
547 
548   // AC SUB_DOUBLE vAA, vBB, vCC
549   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
550 
551   // AD MUL_DOUBLE vAA, vBB, vCC
552   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
553 
554   // AE DIV_DOUBLE vAA, vBB, vCC
555   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
556 
557   // AF REM_DOUBLE vAA, vBB, vCC
558   DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
559 
560   // B0 ADD_INT_2ADDR vA, vB
561   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
562 
563   // B1 SUB_INT_2ADDR vA, vB
564   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
565 
566   // B2 MUL_INT_2ADDR vA, vB
567   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
568 
569   // B3 DIV_INT_2ADDR vA, vB
570   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
571 
572   // B4 REM_INT_2ADDR vA, vB
573   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
574 
575   // B5 AND_INT_2ADDR vA, vB
576   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
577 
578   // B6 OR_INT_2ADDR vA, vB
579   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
580 
581   // B7 XOR_INT_2ADDR vA, vB
582   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
583 
584   // B8 SHL_INT_2ADDR vA, vB
585   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
586 
587   // B9 SHR_INT_2ADDR vA, vB
588   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
589 
590   // BA USHR_INT_2ADDR vA, vB
591   DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
592 
593   // BB ADD_LONG_2ADDR vA, vB
594   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
595 
596   // BC SUB_LONG_2ADDR vA, vB
597   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
598 
599   // BD MUL_LONG_2ADDR vA, vB
600   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
601 
602   // BE DIV_LONG_2ADDR vA, vB
603   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
604 
605   // BF REM_LONG_2ADDR vA, vB
606   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
607 
608   // C0 AND_LONG_2ADDR vA, vB
609   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
610 
611   // C1 OR_LONG_2ADDR vA, vB
612   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
613 
614   // C2 XOR_LONG_2ADDR vA, vB
615   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
616 
617   // C3 SHL_LONG_2ADDR vA, vB
618   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
619 
620   // C4 SHR_LONG_2ADDR vA, vB
621   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
622 
623   // C5 USHR_LONG_2ADDR vA, vB
624   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
625 
626   // C6 ADD_FLOAT_2ADDR vA, vB
627   DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
628 
629   // C7 SUB_FLOAT_2ADDR vA, vB
630   DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
631 
632   // C8 MUL_FLOAT_2ADDR vA, vB
633   DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
634 
635   // C9 DIV_FLOAT_2ADDR vA, vB
636   DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
637 
638   // CA REM_FLOAT_2ADDR vA, vB
639   DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
640 
641   // CB ADD_DOUBLE_2ADDR vA, vB
642   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
643 
644   // CC SUB_DOUBLE_2ADDR vA, vB
645   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
646 
647   // CD MUL_DOUBLE_2ADDR vA, vB
648   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
649 
650   // CE DIV_DOUBLE_2ADDR vA, vB
651   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
652 
653   // CF REM_DOUBLE_2ADDR vA, vB
654   DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
655 
656   // D0 ADD_INT_LIT16 vA, vB, #+CCCC
657   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
658 
659   // D1 RSUB_INT vA, vB, #+CCCC
660   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
661 
662   // D2 MUL_INT_LIT16 vA, vB, #+CCCC
663   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
664 
665   // D3 DIV_INT_LIT16 vA, vB, #+CCCC
666   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
667 
668   // D4 REM_INT_LIT16 vA, vB, #+CCCC
669   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
670 
671   // D5 AND_INT_LIT16 vA, vB, #+CCCC
672   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
673 
674   // D6 OR_INT_LIT16 vA, vB, #+CCCC
675   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
676 
677   // D7 XOR_INT_LIT16 vA, vB, #+CCCC
678   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
679 
680   // D8 ADD_INT_LIT8 vAA, vBB, #+CC
681   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
682 
683   // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
684   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
685 
686   // DA MUL_INT_LIT8 vAA, vBB, #+CC
687   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
688 
689   // DB DIV_INT_LIT8 vAA, vBB, #+CC
690   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
691 
692   // DC REM_INT_LIT8 vAA, vBB, #+CC
693   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
694 
695   // DD AND_INT_LIT8 vAA, vBB, #+CC
696   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
697 
698   // DE OR_INT_LIT8 vAA, vBB, #+CC
699   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
700 
701   // DF XOR_INT_LIT8 vAA, vBB, #+CC
702   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
703 
704   // E0 SHL_INT_LIT8 vAA, vBB, #+CC
705   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
706 
707   // E1 SHR_INT_LIT8 vAA, vBB, #+CC
708   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
709 
710   // E2 USHR_INT_LIT8 vAA, vBB, #+CC
711   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
712 
713   // E3 IGET_QUICK
714   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
715 
716   // E4 IGET_WIDE_QUICK
717   DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
718 
719   // E5 IGET_OBJECT_QUICK
720   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
721 
722   // E6 IPUT_QUICK
723   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
724 
725   // E7 IPUT_WIDE_QUICK
726   DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
727 
728   // E8 IPUT_OBJECT_QUICK
729   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
730 
731   // E9 INVOKE_VIRTUAL_QUICK
732   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
733 
734   // EA INVOKE_VIRTUAL_RANGE_QUICK
735   DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
736 
737   // EB IPUT_BOOLEAN_QUICK vA, vB, index
738   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
739 
740   // EC IPUT_BYTE_QUICK vA, vB, index
741   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
742 
743   // ED IPUT_CHAR_QUICK vA, vB, index
744   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
745 
746   // EE IPUT_SHORT_QUICK vA, vB, index
747   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
748 
749   // EF IGET_BOOLEAN_QUICK vA, vB, index
750   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
751 
752   // F0 IGET_BYTE_QUICK vA, vB, index
753   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
754 
755   // F1 IGET_CHAR_QUICK vA, vB, index
756   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
757 
758   // F2 IGET_SHORT_QUICK vA, vB, index
759   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
760 
761   // F3 UNUSED_F3
762   DF_NOP,
763 
764   // F4 UNUSED_F4
765   DF_NOP,
766 
767   // F5 UNUSED_F5
768   DF_NOP,
769 
770   // F6 UNUSED_F6
771   DF_NOP,
772 
773   // F7 UNUSED_F7
774   DF_NOP,
775 
776   // F8 UNUSED_F8
777   DF_NOP,
778 
779   // F9 UNUSED_F9
780   DF_NOP,
781 
782   // FA UNUSED_FA
783   DF_NOP,
784 
785   // FB UNUSED_FB
786   DF_NOP,
787 
788   // FC UNUSED_FC
789   DF_NOP,
790 
791   // FD UNUSED_FD
792   DF_NOP,
793 
794   // FE UNUSED_FE
795   DF_NOP,
796 
797   // FF UNUSED_FF
798   DF_NOP,
799 
800   // Beginning of extended MIR opcodes
801   // 100 MIR_PHI
802   DF_DA | DF_NULL_TRANSFER_N,
803 
804   // 101 MIR_COPY
805   DF_DA | DF_UB | DF_IS_MOVE,
806 
807   // 102 MIR_FUSED_CMPL_FLOAT
808   DF_UA | DF_UB | DF_FP_A | DF_FP_B,
809 
810   // 103 MIR_FUSED_CMPG_FLOAT
811   DF_UA | DF_UB | DF_FP_A | DF_FP_B,
812 
813   // 104 MIR_FUSED_CMPL_DOUBLE
814   DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
815 
816   // 105 MIR_FUSED_CMPG_DOUBLE
817   DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
818 
819   // 106 MIR_FUSED_CMP_LONG
820   DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
821 
822   // 107 MIR_NOP
823   DF_NOP,
824 
825   // 108 MIR_NULL_CHECK
826   DF_UA | DF_REF_A | DF_NULL_CHK_A | DF_LVN,
827 
828   // 109 MIR_RANGE_CHECK
829   0,
830 
831   // 10A MIR_DIV_ZERO_CHECK
832   0,
833 
834   // 10B MIR_CHECK
835   0,
836 
837   // 10D MIR_SELECT
838   DF_DA | DF_UB,
839 
840   // 10E MirOpConstVector
841   0,
842 
843   // 10F MirOpMoveVector
844   0,
845 
846   // 110 MirOpPackedMultiply
847   0,
848 
849   // 111 MirOpPackedAddition
850   0,
851 
852   // 112 MirOpPackedSubtract
853   0,
854 
855   // 113 MirOpPackedShiftLeft
856   0,
857 
858   // 114 MirOpPackedSignedShiftRight
859   0,
860 
861   // 115 MirOpPackedUnsignedShiftRight
862   0,
863 
864   // 116 MirOpPackedAnd
865   0,
866 
867   // 117 MirOpPackedOr
868   0,
869 
870   // 118 MirOpPackedXor
871   0,
872 
873   // 119 MirOpPackedAddReduce
874   DF_FORMAT_EXTENDED,
875 
876   // 11A MirOpPackedReduce
877   DF_FORMAT_EXTENDED,
878 
879   // 11B MirOpPackedSet
880   DF_FORMAT_EXTENDED,
881 
882   // 11C MirOpReserveVectorRegisters
883   0,
884 
885   // 11D MirOpReturnVectorRegisters
886   0,
887 
888   // 11E MirOpMemBarrier
889   0,
890 
891   // 11F MirOpPackedArrayGet
892   DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
893 
894   // 120 MirOpPackedArrayPut
895   DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
896 
897   // 121 MirOpMaddInt
898   DF_FORMAT_EXTENDED,
899 
900   // 122 MirOpMsubInt
901   DF_FORMAT_EXTENDED,
902 
903   // 123 MirOpMaddLong
904   DF_FORMAT_EXTENDED,
905 
906   // 124 MirOpMsubLong
907   DF_FORMAT_EXTENDED,
908 };
909 
910 /* Any register that is used before being defined is considered live-in */
HandleLiveInUse(ArenaBitVector * use_v,ArenaBitVector * def_v,ArenaBitVector * live_in_v,int dalvik_reg_id)911 void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
912                                ArenaBitVector* live_in_v, int dalvik_reg_id) {
913   use_v->SetBit(dalvik_reg_id);
914   if (!def_v->IsBitSet(dalvik_reg_id)) {
915     live_in_v->SetBit(dalvik_reg_id);
916   }
917 }
918 
919 /* Mark a reg as being defined */
HandleDef(ArenaBitVector * def_v,int dalvik_reg_id)920 void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) {
921   def_v->SetBit(dalvik_reg_id);
922 }
923 
HandleExtended(ArenaBitVector * use_v,ArenaBitVector * def_v,ArenaBitVector * live_in_v,const MIR::DecodedInstruction & d_insn)924 void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
925                               ArenaBitVector* live_in_v,
926                               const MIR::DecodedInstruction& d_insn) {
927   // For vector MIRs, vC contains type information
928   bool is_vector_type_wide = false;
929   int type_size = d_insn.vC >> 16;
930   if (type_size == k64 || type_size == kDouble) {
931     is_vector_type_wide = true;
932   }
933 
934   switch (static_cast<int>(d_insn.opcode)) {
935     case kMirOpPackedAddReduce:
936       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA);
937       if (is_vector_type_wide == true) {
938         HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA + 1);
939       }
940       HandleDef(def_v, d_insn.vA);
941       if (is_vector_type_wide == true) {
942         HandleDef(def_v, d_insn.vA + 1);
943       }
944       break;
945     case kMirOpPackedReduce:
946       HandleDef(def_v, d_insn.vA);
947       if (is_vector_type_wide == true) {
948         HandleDef(def_v, d_insn.vA + 1);
949       }
950       break;
951     case kMirOpPackedSet:
952       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
953       if (is_vector_type_wide == true) {
954         HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
955       }
956       break;
957     case kMirOpMaddInt:
958     case kMirOpMsubInt:
959       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
960       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
961       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
962       HandleDef(def_v, d_insn.vA);
963       break;
964     case kMirOpMaddLong:
965     case kMirOpMsubLong:
966       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
967       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
968       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
969       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC + 1);
970       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
971       HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0] + 1);
972       HandleDef(def_v, d_insn.vA);
973       HandleDef(def_v, d_insn.vA + 1);
974       break;
975     default:
976       LOG(ERROR) << "Unexpected Extended Opcode " << d_insn.opcode;
977       break;
978   }
979 }
980 
981 /*
982  * Find out live-in variables for natural loops. Variables that are live-in in
983  * the main loop body are considered to be defined in the entry block.
984  */
FindLocalLiveIn(BasicBlock * bb)985 bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
986   MIR* mir;
987   ArenaBitVector *use_v, *def_v, *live_in_v;
988 
989   if (bb->data_flow_info == nullptr) return false;
990 
991   use_v = bb->data_flow_info->use_v =
992       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
993   def_v = bb->data_flow_info->def_v =
994       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapDef);
995   live_in_v = bb->data_flow_info->live_in_v =
996       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
997 
998   for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
999     uint64_t df_attributes = GetDataFlowAttributes(mir);
1000     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
1001 
1002     if (df_attributes & DF_HAS_USES) {
1003       if (df_attributes & DF_UA) {
1004         HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA);
1005         if (df_attributes & DF_A_WIDE) {
1006           HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA+1);
1007         }
1008       }
1009       if (df_attributes & DF_UB) {
1010         HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB);
1011         if (df_attributes & DF_B_WIDE) {
1012           HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB+1);
1013         }
1014       }
1015       if (df_attributes & DF_UC) {
1016         HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC);
1017         if (df_attributes & DF_C_WIDE) {
1018           HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+1);
1019         }
1020       }
1021     }
1022     if (df_attributes & DF_FORMAT_35C) {
1023       for (unsigned int i = 0; i < d_insn->vA; i++) {
1024         HandleLiveInUse(use_v, def_v, live_in_v, d_insn->arg[i]);
1025       }
1026     }
1027     if (df_attributes & DF_FORMAT_3RC) {
1028       for (unsigned int i = 0; i < d_insn->vA; i++) {
1029         HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+i);
1030       }
1031     }
1032     if (df_attributes & DF_HAS_DEFS) {
1033       HandleDef(def_v, d_insn->vA);
1034       if (df_attributes & DF_A_WIDE) {
1035         HandleDef(def_v, d_insn->vA+1);
1036       }
1037     }
1038     if (df_attributes & DF_FORMAT_EXTENDED) {
1039       HandleExtended(use_v, def_v, live_in_v, mir->dalvikInsn);
1040     }
1041   }
1042   return true;
1043 }
1044 
AddNewSReg(int v_reg)1045 int MIRGraph::AddNewSReg(int v_reg) {
1046   int subscript = ++ssa_last_defs_[v_reg];
1047   uint32_t ssa_reg = GetNumSSARegs();
1048   SetNumSSARegs(ssa_reg + 1);
1049   ssa_base_vregs_.push_back(v_reg);
1050   ssa_subscripts_.push_back(subscript);
1051   DCHECK_EQ(ssa_base_vregs_.size(), ssa_subscripts_.size());
1052   // If we are expanding very late, update use counts too.
1053   if (ssa_reg > 0 && use_counts_.size() == ssa_reg) {
1054     // Need to expand the counts.
1055     use_counts_.push_back(0);
1056     raw_use_counts_.push_back(0);
1057   }
1058   return ssa_reg;
1059 }
1060 
1061 /* Find out the latest SSA register for a given Dalvik register */
HandleSSAUse(int * uses,int dalvik_reg,int reg_index)1062 void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) {
1063   DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
1064   uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
1065 }
1066 
1067 /* Setup a new SSA register for a given Dalvik register */
HandleSSADef(int * defs,int dalvik_reg,int reg_index)1068 void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
1069   DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
1070   int ssa_reg = AddNewSReg(dalvik_reg);
1071   vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
1072   defs[reg_index] = ssa_reg;
1073 }
1074 
AllocateSSAUseData(MIR * mir,int num_uses)1075 void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
1076   mir->ssa_rep->num_uses = num_uses;
1077 
1078   if (mir->ssa_rep->num_uses_allocated < num_uses) {
1079     mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
1080   }
1081 }
1082 
AllocateSSADefData(MIR * mir,int num_defs)1083 void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
1084   mir->ssa_rep->num_defs = num_defs;
1085 
1086   if (mir->ssa_rep->num_defs_allocated < num_defs) {
1087     mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
1088   }
1089 }
1090 
1091 /* Look up new SSA names for format_35c instructions */
DataFlowSSAFormat35C(MIR * mir)1092 void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
1093   MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
1094   int num_uses = d_insn->vA;
1095   int i;
1096 
1097   AllocateSSAUseData(mir, num_uses);
1098 
1099   for (i = 0; i < num_uses; i++) {
1100     HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
1101   }
1102 }
1103 
1104 /* Look up new SSA names for format_3rc instructions */
DataFlowSSAFormat3RC(MIR * mir)1105 void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) {
1106   MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
1107   int num_uses = d_insn->vA;
1108   int i;
1109 
1110   AllocateSSAUseData(mir, num_uses);
1111 
1112   for (i = 0; i < num_uses; i++) {
1113     HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
1114   }
1115 }
1116 
DataFlowSSAFormatExtended(MIR * mir)1117 void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
1118   const MIR::DecodedInstruction& d_insn = mir->dalvikInsn;
1119   // For vector MIRs, vC contains type information
1120   bool is_vector_type_wide = false;
1121   int type_size = d_insn.vC >> 16;
1122   if (type_size == k64 || type_size == kDouble) {
1123     is_vector_type_wide = true;
1124   }
1125 
1126   switch (static_cast<int>(mir->dalvikInsn.opcode)) {
1127     case kMirOpPackedAddReduce:
1128       // We have one use, plus one more for wide
1129       AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
1130       HandleSSAUse(mir->ssa_rep->uses, d_insn.vA, 0);
1131       if (is_vector_type_wide == true) {
1132         HandleSSAUse(mir->ssa_rep->uses, d_insn.vA + 1, 1);
1133       }
1134 
1135       // We have a def, plus one more for wide
1136       AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
1137       HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
1138       if (is_vector_type_wide == true) {
1139         HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
1140       }
1141       break;
1142     case kMirOpPackedReduce:
1143       // We have a def, plus one more for wide
1144       AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
1145       HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
1146       if (is_vector_type_wide == true) {
1147         HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
1148       }
1149       break;
1150     case kMirOpPackedSet:
1151       // We have one use, plus one more for wide
1152       AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
1153       HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
1154       if (is_vector_type_wide == true) {
1155         HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
1156       }
1157       break;
1158     case kMirOpMaddInt:
1159     case kMirOpMsubInt:
1160       AllocateSSAUseData(mir, 3);
1161       HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
1162       HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 1);
1163       HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 2);
1164       AllocateSSADefData(mir, 1);
1165       HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
1166       break;
1167     case kMirOpMaddLong:
1168     case kMirOpMsubLong:
1169       AllocateSSAUseData(mir, 6);
1170       HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
1171       HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
1172       HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 2);
1173       HandleSSAUse(mir->ssa_rep->uses, d_insn.vC + 1, 3);
1174       HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 4);
1175       HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0] + 1, 5);
1176       AllocateSSADefData(mir, 2);
1177       HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
1178       HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
1179       break;
1180     default:
1181       LOG(ERROR) << "Missing case for extended MIR: " << mir->dalvikInsn.opcode;
1182       break;
1183   }
1184 }
1185 
1186 /* Entry function to convert a block into SSA representation */
DoSSAConversion(BasicBlock * bb)1187 bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
1188   if (bb->data_flow_info == nullptr) return false;
1189 
1190   /*
1191    * Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
1192    * only if the dalvik register is in the live-in set.
1193    */
1194   BasicBlockId bb_id = bb->id;
1195   for (int dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
1196     if (temp_.ssa.phi_node_blocks[dalvik_reg]->IsBitSet(bb_id)) {
1197       if (!bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
1198         /* Variable will be clobbered before being used - no need for phi */
1199         vreg_to_ssa_map_[dalvik_reg] = INVALID_SREG;
1200         continue;
1201       }
1202       MIR *phi = NewMIR();
1203       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
1204       phi->dalvikInsn.vA = dalvik_reg;
1205       phi->offset = bb->start_offset;
1206       phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
1207       bb->PrependMIR(phi);
1208     }
1209   }
1210 
1211   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1212     mir->ssa_rep =
1213         static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
1214                                                               kArenaAllocDFInfo));
1215     memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
1216 
1217     uint64_t df_attributes = GetDataFlowAttributes(mir);
1218 
1219       // If not a pseudo-op, note non-leaf or can throw
1220     if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
1221       int flags = mir->dalvikInsn.FlagsOf();
1222 
1223       if ((flags & Instruction::kInvoke) != 0) {
1224         attributes_ &= ~METHOD_IS_LEAF;
1225       }
1226     }
1227 
1228     int num_uses = 0;
1229 
1230     if (df_attributes & DF_FORMAT_35C) {
1231       DataFlowSSAFormat35C(mir);
1232       continue;
1233     }
1234 
1235     if (df_attributes & DF_FORMAT_3RC) {
1236       DataFlowSSAFormat3RC(mir);
1237       continue;
1238     }
1239 
1240     if (df_attributes & DF_FORMAT_EXTENDED) {
1241       DataFlowSSAFormatExtended(mir);
1242       continue;
1243     }
1244 
1245     if (df_attributes & DF_HAS_USES) {
1246       if (df_attributes & DF_UA) {
1247         num_uses++;
1248         if (df_attributes & DF_A_WIDE) {
1249           num_uses++;
1250         }
1251       }
1252       if (df_attributes & DF_UB) {
1253         num_uses++;
1254         if (df_attributes & DF_B_WIDE) {
1255           num_uses++;
1256         }
1257       }
1258       if (df_attributes & DF_UC) {
1259         num_uses++;
1260         if (df_attributes & DF_C_WIDE) {
1261           num_uses++;
1262         }
1263       }
1264     }
1265 
1266     AllocateSSAUseData(mir, num_uses);
1267 
1268     int num_defs = 0;
1269 
1270     if (df_attributes & DF_HAS_DEFS) {
1271       num_defs++;
1272       if (df_attributes & DF_A_WIDE) {
1273         num_defs++;
1274       }
1275     }
1276 
1277     AllocateSSADefData(mir, num_defs);
1278 
1279     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
1280 
1281     if (df_attributes & DF_HAS_USES) {
1282       num_uses = 0;
1283       if (df_attributes & DF_UA) {
1284         HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
1285         if (df_attributes & DF_A_WIDE) {
1286           HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
1287         }
1288       }
1289       if (df_attributes & DF_UB) {
1290         HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
1291         if (df_attributes & DF_B_WIDE) {
1292           HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
1293         }
1294       }
1295       if (df_attributes & DF_UC) {
1296         HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
1297         if (df_attributes & DF_C_WIDE) {
1298           HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
1299         }
1300       }
1301     }
1302     if (df_attributes & DF_HAS_DEFS) {
1303       HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
1304       if (df_attributes & DF_A_WIDE) {
1305         HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
1306       }
1307     }
1308   }
1309 
1310   /*
1311    * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
1312    * input to PHI nodes can be derived from the snapshot of all
1313    * predecessor blocks.
1314    */
1315   bb->data_flow_info->vreg_to_ssa_map_exit =
1316       arena_->AllocArray<int32_t>(GetNumOfCodeAndTempVRs(), kArenaAllocDFInfo);
1317 
1318   memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
1319          sizeof(int) * GetNumOfCodeAndTempVRs());
1320   return true;
1321 }
1322 
InitializeBasicBlockDataFlow()1323 void MIRGraph::InitializeBasicBlockDataFlow() {
1324   /*
1325    * Allocate the BasicBlockDataFlow structure for the entry and code blocks.
1326    */
1327   for (BasicBlock* bb : block_list_) {
1328     if (bb->hidden == true) continue;
1329     if (bb->block_type == kDalvikByteCode ||
1330         bb->block_type == kEntryBlock ||
1331         bb->block_type == kExitBlock) {
1332       bb->data_flow_info =
1333           static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
1334                                                          kArenaAllocDFInfo));
1335       }
1336   }
1337 }
1338 
1339 /* Setup the basic data structures for SSA conversion */
CompilerInitializeSSAConversion()1340 void MIRGraph::CompilerInitializeSSAConversion() {
1341   size_t num_reg = GetNumOfCodeAndTempVRs();
1342 
1343   ssa_base_vregs_.clear();
1344   ssa_base_vregs_.reserve(num_reg + GetDefCount() + 128);
1345   ssa_subscripts_.clear();
1346   ssa_subscripts_.reserve(num_reg + GetDefCount() + 128);
1347 
1348   /*
1349    * Initial number of SSA registers is equal to the number of Dalvik
1350    * registers.
1351    */
1352   SetNumSSARegs(num_reg);
1353 
1354   /*
1355    * Initialize the SSA2Dalvik map list. For the first num_reg elements,
1356    * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
1357    * into "(0 << 16) | i"
1358    */
1359   for (unsigned int i = 0; i < num_reg; i++) {
1360     ssa_base_vregs_.push_back(i);
1361     ssa_subscripts_.push_back(0);
1362   }
1363 
1364   /*
1365    * Initialize the DalvikToSSAMap map. There is one entry for each
1366    * Dalvik register, and the SSA names for those are the same.
1367    */
1368   vreg_to_ssa_map_ = arena_->AllocArray<int32_t>(num_reg, kArenaAllocDFInfo);
1369   /* Keep track of the higest def for each dalvik reg */
1370   ssa_last_defs_ = arena_->AllocArray<int>(num_reg, kArenaAllocDFInfo);
1371 
1372   for (unsigned int i = 0; i < num_reg; i++) {
1373     vreg_to_ssa_map_[i] = i;
1374     ssa_last_defs_[i] = 0;
1375   }
1376 
1377   // Create a compiler temporary for Method*. This is done after SSA initialization.
1378   CompilerTemp* method_temp = GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
1379   // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
1380   method_sreg_ = method_temp->s_reg_low;
1381 
1382   InitializeBasicBlockDataFlow();
1383 }
1384 
GetUseCountWeight(BasicBlock * bb) const1385 uint32_t MIRGraph::GetUseCountWeight(BasicBlock* bb) const {
1386   // Each level of nesting adds *100 to count, up to 3 levels deep.
1387   uint32_t depth = std::min(3U, static_cast<uint32_t>(bb->nesting_depth));
1388   uint32_t weight = std::max(1U, depth * 100);
1389   return weight;
1390 }
1391 
1392 /*
1393  * Count uses, weighting by loop nesting depth.  This code only
1394  * counts explicitly used s_regs.  A later phase will add implicit
1395  * counts for things such as Method*, null-checked references, etc.
1396  */
CountUses(BasicBlock * bb)1397 void MIRGraph::CountUses(BasicBlock* bb) {
1398   if (bb->block_type != kDalvikByteCode) {
1399     return;
1400   }
1401   uint32_t weight = GetUseCountWeight(bb);
1402   for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
1403     if (mir->ssa_rep == nullptr) {
1404       continue;
1405     }
1406     for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
1407       int s_reg = mir->ssa_rep->uses[i];
1408       raw_use_counts_[s_reg] += 1u;
1409       use_counts_[s_reg] += weight;
1410     }
1411   }
1412 }
1413 
1414 /* Verify if all the successor is connected with all the claimed predecessors */
VerifyPredInfo(BasicBlock * bb)1415 bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
1416   for (BasicBlockId pred_id : bb->predecessors) {
1417     BasicBlock* pred_bb = GetBasicBlock(pred_id);
1418     DCHECK(pred_bb != nullptr);
1419     bool found = false;
1420     if (pred_bb->taken == bb->id) {
1421         found = true;
1422     } else if (pred_bb->fall_through == bb->id) {
1423         found = true;
1424     } else if (pred_bb->successor_block_list_type != kNotUsed) {
1425       for (SuccessorBlockInfo* successor_block_info : pred_bb->successor_blocks) {
1426         BasicBlockId succ_bb = successor_block_info->block;
1427         if (succ_bb == bb->id) {
1428             found = true;
1429             break;
1430         }
1431       }
1432     }
1433     if (found == false) {
1434       char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
1435       GetBlockName(bb, block_name1);
1436       GetBlockName(pred_bb, block_name2);
1437       DumpCFG("/sdcard/cfg/", false);
1438       LOG(FATAL) << "Successor " << block_name1 << " not found from "
1439                  << block_name2;
1440     }
1441   }
1442   return true;
1443 }
1444 
VerifyDataflow()1445 void MIRGraph::VerifyDataflow() {
1446     /* Verify if all blocks are connected as claimed */
1447   AllNodesIterator iter(this);
1448   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1449     VerifyPredInfo(bb);
1450   }
1451 }
1452 
1453 }  // namespace art
1454