• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <algorithm>
18 #include <memory>
19 
20 #include "base/logging.h"
21 #include "base/scoped_arena_containers.h"
22 #include "dataflow_iterator-inl.h"
23 #include "compiler_ir.h"
24 #include "dex_flags.h"
25 #include "dex_instruction-inl.h"
26 #include "dex/mir_field_info.h"
27 #include "dex/verified_method.h"
28 #include "dex/quick/dex_file_method_inliner.h"
29 #include "dex/quick/dex_file_to_method_inliner_map.h"
30 #include "driver/compiler_driver.h"
31 #include "driver/compiler_options.h"
32 #include "driver/dex_compilation_unit.h"
33 #include "utils.h"
34 
35 namespace art {
36 
37 enum InstructionAnalysisAttributeOps : uint8_t {
38   kUninterestingOp = 0,
39   kArithmeticOp,
40   kFpOp,
41   kSingleOp,
42   kDoubleOp,
43   kIntOp,
44   kLongOp,
45   kBranchOp,
46   kInvokeOp,
47   kArrayOp,
48   kHeavyweightOp,
49   kSimpleConstOp,
50   kMoveOp,
51   kSwitch
52 };
53 
54 enum InstructionAnalysisAttributeMasks : uint16_t {
55   kAnNone = 1 << kUninterestingOp,
56   kAnMath = 1 << kArithmeticOp,
57   kAnFp = 1 << kFpOp,
58   kAnLong = 1 << kLongOp,
59   kAnInt = 1 << kIntOp,
60   kAnSingle = 1 << kSingleOp,
61   kAnDouble = 1 << kDoubleOp,
62   kAnFloatMath = 1 << kFpOp,
63   kAnBranch = 1 << kBranchOp,
64   kAnInvoke = 1 << kInvokeOp,
65   kAnArrayOp = 1 << kArrayOp,
66   kAnHeavyWeight = 1 << kHeavyweightOp,
67   kAnSimpleConst = 1 << kSimpleConstOp,
68   kAnMove = 1 << kMoveOp,
69   kAnSwitch = 1 << kSwitch,
70   kAnComputational = kAnMath | kAnArrayOp | kAnMove | kAnSimpleConst,
71 };
72 
73 // Instruction characteristics used to statically identify computation-intensive methods.
74 static const uint16_t kAnalysisAttributes[kMirOpLast] = {
75   // 00 NOP
76   kAnNone,
77 
78   // 01 MOVE vA, vB
79   kAnMove,
80 
81   // 02 MOVE_FROM16 vAA, vBBBB
82   kAnMove,
83 
84   // 03 MOVE_16 vAAAA, vBBBB
85   kAnMove,
86 
87   // 04 MOVE_WIDE vA, vB
88   kAnMove,
89 
90   // 05 MOVE_WIDE_FROM16 vAA, vBBBB
91   kAnMove,
92 
93   // 06 MOVE_WIDE_16 vAAAA, vBBBB
94   kAnMove,
95 
96   // 07 MOVE_OBJECT vA, vB
97   kAnMove,
98 
99   // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
100   kAnMove,
101 
102   // 09 MOVE_OBJECT_16 vAAAA, vBBBB
103   kAnMove,
104 
105   // 0A MOVE_RESULT vAA
106   kAnMove,
107 
108   // 0B MOVE_RESULT_WIDE vAA
109   kAnMove,
110 
111   // 0C MOVE_RESULT_OBJECT vAA
112   kAnMove,
113 
114   // 0D MOVE_EXCEPTION vAA
115   kAnMove,
116 
117   // 0E RETURN_VOID
118   kAnBranch,
119 
120   // 0F RETURN vAA
121   kAnBranch,
122 
123   // 10 RETURN_WIDE vAA
124   kAnBranch,
125 
126   // 11 RETURN_OBJECT vAA
127   kAnBranch,
128 
129   // 12 CONST_4 vA, #+B
130   kAnSimpleConst,
131 
132   // 13 CONST_16 vAA, #+BBBB
133   kAnSimpleConst,
134 
135   // 14 CONST vAA, #+BBBBBBBB
136   kAnSimpleConst,
137 
138   // 15 CONST_HIGH16 VAA, #+BBBB0000
139   kAnSimpleConst,
140 
141   // 16 CONST_WIDE_16 vAA, #+BBBB
142   kAnSimpleConst,
143 
144   // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
145   kAnSimpleConst,
146 
147   // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
148   kAnSimpleConst,
149 
150   // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
151   kAnSimpleConst,
152 
153   // 1A CONST_STRING vAA, string@BBBB
154   kAnNone,
155 
156   // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
157   kAnNone,
158 
159   // 1C CONST_CLASS vAA, type@BBBB
160   kAnNone,
161 
162   // 1D MONITOR_ENTER vAA
163   kAnNone,
164 
165   // 1E MONITOR_EXIT vAA
166   kAnNone,
167 
168   // 1F CHK_CAST vAA, type@BBBB
169   kAnNone,
170 
171   // 20 INSTANCE_OF vA, vB, type@CCCC
172   kAnNone,
173 
174   // 21 ARRAY_LENGTH vA, vB
175   kAnArrayOp,
176 
177   // 22 NEW_INSTANCE vAA, type@BBBB
178   kAnHeavyWeight,
179 
180   // 23 NEW_ARRAY vA, vB, type@CCCC
181   kAnHeavyWeight,
182 
183   // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
184   kAnHeavyWeight,
185 
186   // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
187   kAnHeavyWeight,
188 
189   // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
190   kAnNone,
191 
192   // 27 THROW vAA
193   kAnHeavyWeight | kAnBranch,
194 
195   // 28 GOTO
196   kAnBranch,
197 
198   // 29 GOTO_16
199   kAnBranch,
200 
201   // 2A GOTO_32
202   kAnBranch,
203 
204   // 2B PACKED_SWITCH vAA, +BBBBBBBB
205   kAnSwitch,
206 
207   // 2C SPARSE_SWITCH vAA, +BBBBBBBB
208   kAnSwitch,
209 
210   // 2D CMPL_FLOAT vAA, vBB, vCC
211   kAnMath | kAnFp | kAnSingle,
212 
213   // 2E CMPG_FLOAT vAA, vBB, vCC
214   kAnMath | kAnFp | kAnSingle,
215 
216   // 2F CMPL_DOUBLE vAA, vBB, vCC
217   kAnMath | kAnFp | kAnDouble,
218 
219   // 30 CMPG_DOUBLE vAA, vBB, vCC
220   kAnMath | kAnFp | kAnDouble,
221 
222   // 31 CMP_LONG vAA, vBB, vCC
223   kAnMath | kAnLong,
224 
225   // 32 IF_EQ vA, vB, +CCCC
226   kAnMath | kAnBranch | kAnInt,
227 
228   // 33 IF_NE vA, vB, +CCCC
229   kAnMath | kAnBranch | kAnInt,
230 
231   // 34 IF_LT vA, vB, +CCCC
232   kAnMath | kAnBranch | kAnInt,
233 
234   // 35 IF_GE vA, vB, +CCCC
235   kAnMath | kAnBranch | kAnInt,
236 
237   // 36 IF_GT vA, vB, +CCCC
238   kAnMath | kAnBranch | kAnInt,
239 
240   // 37 IF_LE vA, vB, +CCCC
241   kAnMath | kAnBranch | kAnInt,
242 
243   // 38 IF_EQZ vAA, +BBBB
244   kAnMath | kAnBranch | kAnInt,
245 
246   // 39 IF_NEZ vAA, +BBBB
247   kAnMath | kAnBranch | kAnInt,
248 
249   // 3A IF_LTZ vAA, +BBBB
250   kAnMath | kAnBranch | kAnInt,
251 
252   // 3B IF_GEZ vAA, +BBBB
253   kAnMath | kAnBranch | kAnInt,
254 
255   // 3C IF_GTZ vAA, +BBBB
256   kAnMath | kAnBranch | kAnInt,
257 
258   // 3D IF_LEZ vAA, +BBBB
259   kAnMath | kAnBranch | kAnInt,
260 
261   // 3E UNUSED_3E
262   kAnNone,
263 
264   // 3F UNUSED_3F
265   kAnNone,
266 
267   // 40 UNUSED_40
268   kAnNone,
269 
270   // 41 UNUSED_41
271   kAnNone,
272 
273   // 42 UNUSED_42
274   kAnNone,
275 
276   // 43 UNUSED_43
277   kAnNone,
278 
279   // 44 AGET vAA, vBB, vCC
280   kAnArrayOp,
281 
282   // 45 AGET_WIDE vAA, vBB, vCC
283   kAnArrayOp,
284 
285   // 46 AGET_OBJECT vAA, vBB, vCC
286   kAnArrayOp,
287 
288   // 47 AGET_BOOLEAN vAA, vBB, vCC
289   kAnArrayOp,
290 
291   // 48 AGET_BYTE vAA, vBB, vCC
292   kAnArrayOp,
293 
294   // 49 AGET_CHAR vAA, vBB, vCC
295   kAnArrayOp,
296 
297   // 4A AGET_SHORT vAA, vBB, vCC
298   kAnArrayOp,
299 
300   // 4B APUT vAA, vBB, vCC
301   kAnArrayOp,
302 
303   // 4C APUT_WIDE vAA, vBB, vCC
304   kAnArrayOp,
305 
306   // 4D APUT_OBJECT vAA, vBB, vCC
307   kAnArrayOp,
308 
309   // 4E APUT_BOOLEAN vAA, vBB, vCC
310   kAnArrayOp,
311 
312   // 4F APUT_BYTE vAA, vBB, vCC
313   kAnArrayOp,
314 
315   // 50 APUT_CHAR vAA, vBB, vCC
316   kAnArrayOp,
317 
318   // 51 APUT_SHORT vAA, vBB, vCC
319   kAnArrayOp,
320 
321   // 52 IGET vA, vB, field@CCCC
322   kAnNone,
323 
324   // 53 IGET_WIDE vA, vB, field@CCCC
325   kAnNone,
326 
327   // 54 IGET_OBJECT vA, vB, field@CCCC
328   kAnNone,
329 
330   // 55 IGET_BOOLEAN vA, vB, field@CCCC
331   kAnNone,
332 
333   // 56 IGET_BYTE vA, vB, field@CCCC
334   kAnNone,
335 
336   // 57 IGET_CHAR vA, vB, field@CCCC
337   kAnNone,
338 
339   // 58 IGET_SHORT vA, vB, field@CCCC
340   kAnNone,
341 
342   // 59 IPUT vA, vB, field@CCCC
343   kAnNone,
344 
345   // 5A IPUT_WIDE vA, vB, field@CCCC
346   kAnNone,
347 
348   // 5B IPUT_OBJECT vA, vB, field@CCCC
349   kAnNone,
350 
351   // 5C IPUT_BOOLEAN vA, vB, field@CCCC
352   kAnNone,
353 
354   // 5D IPUT_BYTE vA, vB, field@CCCC
355   kAnNone,
356 
357   // 5E IPUT_CHAR vA, vB, field@CCCC
358   kAnNone,
359 
360   // 5F IPUT_SHORT vA, vB, field@CCCC
361   kAnNone,
362 
363   // 60 SGET vAA, field@BBBB
364   kAnNone,
365 
366   // 61 SGET_WIDE vAA, field@BBBB
367   kAnNone,
368 
369   // 62 SGET_OBJECT vAA, field@BBBB
370   kAnNone,
371 
372   // 63 SGET_BOOLEAN vAA, field@BBBB
373   kAnNone,
374 
375   // 64 SGET_BYTE vAA, field@BBBB
376   kAnNone,
377 
378   // 65 SGET_CHAR vAA, field@BBBB
379   kAnNone,
380 
381   // 66 SGET_SHORT vAA, field@BBBB
382   kAnNone,
383 
384   // 67 SPUT vAA, field@BBBB
385   kAnNone,
386 
387   // 68 SPUT_WIDE vAA, field@BBBB
388   kAnNone,
389 
390   // 69 SPUT_OBJECT vAA, field@BBBB
391   kAnNone,
392 
393   // 6A SPUT_BOOLEAN vAA, field@BBBB
394   kAnNone,
395 
396   // 6B SPUT_BYTE vAA, field@BBBB
397   kAnNone,
398 
399   // 6C SPUT_CHAR vAA, field@BBBB
400   kAnNone,
401 
402   // 6D SPUT_SHORT vAA, field@BBBB
403   kAnNone,
404 
405   // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
406   kAnInvoke | kAnHeavyWeight,
407 
408   // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
409   kAnInvoke | kAnHeavyWeight,
410 
411   // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
412   kAnInvoke | kAnHeavyWeight,
413 
414   // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
415   kAnInvoke | kAnHeavyWeight,
416 
417   // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
418   kAnInvoke | kAnHeavyWeight,
419 
420   // 73 RETURN_VOID_NO_BARRIER
421   kAnBranch,
422 
423   // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
424   kAnInvoke | kAnHeavyWeight,
425 
426   // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
427   kAnInvoke | kAnHeavyWeight,
428 
429   // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
430   kAnInvoke | kAnHeavyWeight,
431 
432   // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
433   kAnInvoke | kAnHeavyWeight,
434 
435   // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
436   kAnInvoke | kAnHeavyWeight,
437 
438   // 79 UNUSED_79
439   kAnNone,
440 
441   // 7A UNUSED_7A
442   kAnNone,
443 
444   // 7B NEG_INT vA, vB
445   kAnMath | kAnInt,
446 
447   // 7C NOT_INT vA, vB
448   kAnMath | kAnInt,
449 
450   // 7D NEG_LONG vA, vB
451   kAnMath | kAnLong,
452 
453   // 7E NOT_LONG vA, vB
454   kAnMath | kAnLong,
455 
456   // 7F NEG_FLOAT vA, vB
457   kAnMath | kAnFp | kAnSingle,
458 
459   // 80 NEG_DOUBLE vA, vB
460   kAnMath | kAnFp | kAnDouble,
461 
462   // 81 INT_TO_LONG vA, vB
463   kAnMath | kAnInt | kAnLong,
464 
465   // 82 INT_TO_FLOAT vA, vB
466   kAnMath | kAnFp | kAnInt | kAnSingle,
467 
468   // 83 INT_TO_DOUBLE vA, vB
469   kAnMath | kAnFp | kAnInt | kAnDouble,
470 
471   // 84 LONG_TO_INT vA, vB
472   kAnMath | kAnInt | kAnLong,
473 
474   // 85 LONG_TO_FLOAT vA, vB
475   kAnMath | kAnFp | kAnLong | kAnSingle,
476 
477   // 86 LONG_TO_DOUBLE vA, vB
478   kAnMath | kAnFp | kAnLong | kAnDouble,
479 
480   // 87 FLOAT_TO_INT vA, vB
481   kAnMath | kAnFp | kAnInt | kAnSingle,
482 
483   // 88 FLOAT_TO_LONG vA, vB
484   kAnMath | kAnFp | kAnLong | kAnSingle,
485 
486   // 89 FLOAT_TO_DOUBLE vA, vB
487   kAnMath | kAnFp | kAnSingle | kAnDouble,
488 
489   // 8A DOUBLE_TO_INT vA, vB
490   kAnMath | kAnFp | kAnInt | kAnDouble,
491 
492   // 8B DOUBLE_TO_LONG vA, vB
493   kAnMath | kAnFp | kAnLong | kAnDouble,
494 
495   // 8C DOUBLE_TO_FLOAT vA, vB
496   kAnMath | kAnFp | kAnSingle | kAnDouble,
497 
498   // 8D INT_TO_BYTE vA, vB
499   kAnMath | kAnInt,
500 
501   // 8E INT_TO_CHAR vA, vB
502   kAnMath | kAnInt,
503 
504   // 8F INT_TO_SHORT vA, vB
505   kAnMath | kAnInt,
506 
507   // 90 ADD_INT vAA, vBB, vCC
508   kAnMath | kAnInt,
509 
510   // 91 SUB_INT vAA, vBB, vCC
511   kAnMath | kAnInt,
512 
513   // 92 MUL_INT vAA, vBB, vCC
514   kAnMath | kAnInt,
515 
516   // 93 DIV_INT vAA, vBB, vCC
517   kAnMath | kAnInt,
518 
519   // 94 REM_INT vAA, vBB, vCC
520   kAnMath | kAnInt,
521 
522   // 95 AND_INT vAA, vBB, vCC
523   kAnMath | kAnInt,
524 
525   // 96 OR_INT vAA, vBB, vCC
526   kAnMath | kAnInt,
527 
528   // 97 XOR_INT vAA, vBB, vCC
529   kAnMath | kAnInt,
530 
531   // 98 SHL_INT vAA, vBB, vCC
532   kAnMath | kAnInt,
533 
534   // 99 SHR_INT vAA, vBB, vCC
535   kAnMath | kAnInt,
536 
537   // 9A USHR_INT vAA, vBB, vCC
538   kAnMath | kAnInt,
539 
540   // 9B ADD_LONG vAA, vBB, vCC
541   kAnMath | kAnLong,
542 
543   // 9C SUB_LONG vAA, vBB, vCC
544   kAnMath | kAnLong,
545 
546   // 9D MUL_LONG vAA, vBB, vCC
547   kAnMath | kAnLong,
548 
549   // 9E DIV_LONG vAA, vBB, vCC
550   kAnMath | kAnLong,
551 
552   // 9F REM_LONG vAA, vBB, vCC
553   kAnMath | kAnLong,
554 
555   // A0 AND_LONG vAA, vBB, vCC
556   kAnMath | kAnLong,
557 
558   // A1 OR_LONG vAA, vBB, vCC
559   kAnMath | kAnLong,
560 
561   // A2 XOR_LONG vAA, vBB, vCC
562   kAnMath | kAnLong,
563 
564   // A3 SHL_LONG vAA, vBB, vCC
565   kAnMath | kAnLong,
566 
567   // A4 SHR_LONG vAA, vBB, vCC
568   kAnMath | kAnLong,
569 
570   // A5 USHR_LONG vAA, vBB, vCC
571   kAnMath | kAnLong,
572 
573   // A6 ADD_FLOAT vAA, vBB, vCC
574   kAnMath | kAnFp | kAnSingle,
575 
576   // A7 SUB_FLOAT vAA, vBB, vCC
577   kAnMath | kAnFp | kAnSingle,
578 
579   // A8 MUL_FLOAT vAA, vBB, vCC
580   kAnMath | kAnFp | kAnSingle,
581 
582   // A9 DIV_FLOAT vAA, vBB, vCC
583   kAnMath | kAnFp | kAnSingle,
584 
585   // AA REM_FLOAT vAA, vBB, vCC
586   kAnMath | kAnFp | kAnSingle,
587 
588   // AB ADD_DOUBLE vAA, vBB, vCC
589   kAnMath | kAnFp | kAnDouble,
590 
591   // AC SUB_DOUBLE vAA, vBB, vCC
592   kAnMath | kAnFp | kAnDouble,
593 
594   // AD MUL_DOUBLE vAA, vBB, vCC
595   kAnMath | kAnFp | kAnDouble,
596 
597   // AE DIV_DOUBLE vAA, vBB, vCC
598   kAnMath | kAnFp | kAnDouble,
599 
600   // AF REM_DOUBLE vAA, vBB, vCC
601   kAnMath | kAnFp | kAnDouble,
602 
603   // B0 ADD_INT_2ADDR vA, vB
604   kAnMath | kAnInt,
605 
606   // B1 SUB_INT_2ADDR vA, vB
607   kAnMath | kAnInt,
608 
609   // B2 MUL_INT_2ADDR vA, vB
610   kAnMath | kAnInt,
611 
612   // B3 DIV_INT_2ADDR vA, vB
613   kAnMath | kAnInt,
614 
615   // B4 REM_INT_2ADDR vA, vB
616   kAnMath | kAnInt,
617 
618   // B5 AND_INT_2ADDR vA, vB
619   kAnMath | kAnInt,
620 
621   // B6 OR_INT_2ADDR vA, vB
622   kAnMath | kAnInt,
623 
624   // B7 XOR_INT_2ADDR vA, vB
625   kAnMath | kAnInt,
626 
627   // B8 SHL_INT_2ADDR vA, vB
628   kAnMath | kAnInt,
629 
630   // B9 SHR_INT_2ADDR vA, vB
631   kAnMath | kAnInt,
632 
633   // BA USHR_INT_2ADDR vA, vB
634   kAnMath | kAnInt,
635 
636   // BB ADD_LONG_2ADDR vA, vB
637   kAnMath | kAnLong,
638 
639   // BC SUB_LONG_2ADDR vA, vB
640   kAnMath | kAnLong,
641 
642   // BD MUL_LONG_2ADDR vA, vB
643   kAnMath | kAnLong,
644 
645   // BE DIV_LONG_2ADDR vA, vB
646   kAnMath | kAnLong,
647 
648   // BF REM_LONG_2ADDR vA, vB
649   kAnMath | kAnLong,
650 
651   // C0 AND_LONG_2ADDR vA, vB
652   kAnMath | kAnLong,
653 
654   // C1 OR_LONG_2ADDR vA, vB
655   kAnMath | kAnLong,
656 
657   // C2 XOR_LONG_2ADDR vA, vB
658   kAnMath | kAnLong,
659 
660   // C3 SHL_LONG_2ADDR vA, vB
661   kAnMath | kAnLong,
662 
663   // C4 SHR_LONG_2ADDR vA, vB
664   kAnMath | kAnLong,
665 
666   // C5 USHR_LONG_2ADDR vA, vB
667   kAnMath | kAnLong,
668 
669   // C6 ADD_FLOAT_2ADDR vA, vB
670   kAnMath | kAnFp | kAnSingle,
671 
672   // C7 SUB_FLOAT_2ADDR vA, vB
673   kAnMath | kAnFp | kAnSingle,
674 
675   // C8 MUL_FLOAT_2ADDR vA, vB
676   kAnMath | kAnFp | kAnSingle,
677 
678   // C9 DIV_FLOAT_2ADDR vA, vB
679   kAnMath | kAnFp | kAnSingle,
680 
681   // CA REM_FLOAT_2ADDR vA, vB
682   kAnMath | kAnFp | kAnSingle,
683 
684   // CB ADD_DOUBLE_2ADDR vA, vB
685   kAnMath | kAnFp | kAnDouble,
686 
687   // CC SUB_DOUBLE_2ADDR vA, vB
688   kAnMath | kAnFp | kAnDouble,
689 
690   // CD MUL_DOUBLE_2ADDR vA, vB
691   kAnMath | kAnFp | kAnDouble,
692 
693   // CE DIV_DOUBLE_2ADDR vA, vB
694   kAnMath | kAnFp | kAnDouble,
695 
696   // CF REM_DOUBLE_2ADDR vA, vB
697   kAnMath | kAnFp | kAnDouble,
698 
699   // D0 ADD_INT_LIT16 vA, vB, #+CCCC
700   kAnMath | kAnInt,
701 
702   // D1 RSUB_INT vA, vB, #+CCCC
703   kAnMath | kAnInt,
704 
705   // D2 MUL_INT_LIT16 vA, vB, #+CCCC
706   kAnMath | kAnInt,
707 
708   // D3 DIV_INT_LIT16 vA, vB, #+CCCC
709   kAnMath | kAnInt,
710 
711   // D4 REM_INT_LIT16 vA, vB, #+CCCC
712   kAnMath | kAnInt,
713 
714   // D5 AND_INT_LIT16 vA, vB, #+CCCC
715   kAnMath | kAnInt,
716 
717   // D6 OR_INT_LIT16 vA, vB, #+CCCC
718   kAnMath | kAnInt,
719 
720   // D7 XOR_INT_LIT16 vA, vB, #+CCCC
721   kAnMath | kAnInt,
722 
723   // D8 ADD_INT_LIT8 vAA, vBB, #+CC
724   kAnMath | kAnInt,
725 
726   // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
727   kAnMath | kAnInt,
728 
729   // DA MUL_INT_LIT8 vAA, vBB, #+CC
730   kAnMath | kAnInt,
731 
732   // DB DIV_INT_LIT8 vAA, vBB, #+CC
733   kAnMath | kAnInt,
734 
735   // DC REM_INT_LIT8 vAA, vBB, #+CC
736   kAnMath | kAnInt,
737 
738   // DD AND_INT_LIT8 vAA, vBB, #+CC
739   kAnMath | kAnInt,
740 
741   // DE OR_INT_LIT8 vAA, vBB, #+CC
742   kAnMath | kAnInt,
743 
744   // DF XOR_INT_LIT8 vAA, vBB, #+CC
745   kAnMath | kAnInt,
746 
747   // E0 SHL_INT_LIT8 vAA, vBB, #+CC
748   kAnMath | kAnInt,
749 
750   // E1 SHR_INT_LIT8 vAA, vBB, #+CC
751   kAnMath | kAnInt,
752 
753   // E2 USHR_INT_LIT8 vAA, vBB, #+CC
754   kAnMath | kAnInt,
755 
756   // E3 IGET_QUICK
757   kAnNone,
758 
759   // E4 IGET_WIDE_QUICK
760   kAnNone,
761 
762   // E5 IGET_OBJECT_QUICK
763   kAnNone,
764 
765   // E6 IPUT_QUICK
766   kAnNone,
767 
768   // E7 IPUT_WIDE_QUICK
769   kAnNone,
770 
771   // E8 IPUT_OBJECT_QUICK
772   kAnNone,
773 
774   // E9 INVOKE_VIRTUAL_QUICK
775   kAnInvoke | kAnHeavyWeight,
776 
777   // EA INVOKE_VIRTUAL_RANGE_QUICK
778   kAnInvoke | kAnHeavyWeight,
779 
780   // EB IPUT_BOOLEAN_QUICK
781   kAnNone,
782 
783   // EC IPUT_BYTE_QUICK
784   kAnNone,
785 
786   // ED IPUT_CHAR_QUICK
787   kAnNone,
788 
789   // EE IPUT_SHORT_QUICK
790   kAnNone,
791 
792   // EF IGET_BOOLEAN_QUICK
793   kAnNone,
794 
795   // F0 IGET_BYTE_QUICK
796   kAnNone,
797 
798   // F1 IGET_CHAR_QUICK
799   kAnNone,
800 
801   // F2 IGET_SHORT_QUICK
802   kAnNone,
803 
804   // F3 UNUSED_F3
805   kAnNone,
806 
807   // F4 UNUSED_F4
808   kAnNone,
809 
810   // F5 UNUSED_F5
811   kAnNone,
812 
813   // F6 UNUSED_F6
814   kAnNone,
815 
816   // F7 UNUSED_F7
817   kAnNone,
818 
819   // F8 UNUSED_F8
820   kAnNone,
821 
822   // F9 UNUSED_F9
823   kAnNone,
824 
825   // FA UNUSED_FA
826   kAnNone,
827 
828   // FB UNUSED_FB
829   kAnNone,
830 
831   // FC UNUSED_FC
832   kAnNone,
833 
834   // FD UNUSED_FD
835   kAnNone,
836 
837   // FE UNUSED_FE
838   kAnNone,
839 
840   // FF UNUSED_FF
841   kAnNone,
842 
843   // Beginning of extended MIR opcodes
844   // 100 MIR_PHI
845   kAnNone,
846 
847   // 101 MIR_COPY
848   kAnNone,
849 
850   // 102 MIR_FUSED_CMPL_FLOAT
851   kAnNone,
852 
853   // 103 MIR_FUSED_CMPG_FLOAT
854   kAnNone,
855 
856   // 104 MIR_FUSED_CMPL_DOUBLE
857   kAnNone,
858 
859   // 105 MIR_FUSED_CMPG_DOUBLE
860   kAnNone,
861 
862   // 106 MIR_FUSED_CMP_LONG
863   kAnNone,
864 
865   // 107 MIR_NOP
866   kAnNone,
867 
868   // 108 MIR_NULL_CHECK
869   kAnNone,
870 
871   // 109 MIR_RANGE_CHECK
872   kAnNone,
873 
874   // 10A MIR_DIV_ZERO_CHECK
875   kAnNone,
876 
877   // 10B MIR_CHECK
878   kAnNone,
879 
880   // 10C MIR_CHECKPART2
881   kAnNone,
882 
883   // 10D MIR_SELECT
884   kAnNone,
885 
886   // 10E MirOpConstVector
887   kAnNone,
888 
889   // 10F MirOpMoveVector
890   kAnNone,
891 
892   // 110 MirOpPackedMultiply
893   kAnNone,
894 
895   // 111 MirOpPackedAddition
896   kAnNone,
897 
898   // 112 MirOpPackedSubtract
899   kAnNone,
900 
901   // 113 MirOpPackedShiftLeft
902   kAnNone,
903 
904   // 114 MirOpPackedSignedShiftRight
905   kAnNone,
906 
907   // 115 MirOpPackedUnsignedShiftRight
908   kAnNone,
909 
910   // 116 MirOpPackedAnd
911   kAnNone,
912 
913   // 117 MirOpPackedOr
914   kAnNone,
915 
916   // 118 MirOpPackedXor
917   kAnNone,
918 
919   // 119 MirOpPackedAddReduce
920   kAnNone,
921 
922   // 11A MirOpPackedReduce
923   kAnNone,
924 
925   // 11B MirOpPackedSet
926   kAnNone,
927 
928   // 11C MirOpReserveVectorRegisters
929   kAnNone,
930 
931   // 11D MirOpReturnVectorRegisters
932   kAnNone,
933 
934   // 11E MirOpMemBarrier
935   kAnNone,
936 
937   // 11F MirOpPackedArrayGet
938   kAnArrayOp,
939 
940   // 120 MirOpPackedArrayPut
941   kAnArrayOp,
942 };
943 
944 struct MethodStats {
945   int dex_instructions;
946   int math_ops;
947   int fp_ops;
948   int array_ops;
949   int branch_ops;
950   int heavyweight_ops;
951   bool has_computational_loop;
952   bool has_switch;
953   float math_ratio;
954   float fp_ratio;
955   float array_ratio;
956   float branch_ratio;
957   float heavyweight_ratio;
958 };
959 
AnalyzeBlock(BasicBlock * bb,MethodStats * stats)960 void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
961   if (bb->visited || (bb->block_type != kDalvikByteCode)) {
962     return;
963   }
964   bool computational_block = true;
965   bool has_math = false;
966   /*
967    * For the purposes of this scan, we want to treat the set of basic blocks broken
968    * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
969    * edges until we reach an explicit branch or return.
970    */
971   BasicBlock* ending_bb = bb;
972   if (ending_bb->last_mir_insn != nullptr) {
973     uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
974     while ((ending_flags & kAnBranch) == 0) {
975       ending_bb = GetBasicBlock(ending_bb->fall_through);
976       ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
977     }
978   }
979   /*
980    * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
981    * first need to do some expensive loop detection - and the point of this is to make
982    * an informed guess before investing in computation.  However, we can cheaply detect
983    * many simple loop forms without having to do full dataflow analysis.
984    */
985   int loop_scale_factor = 1;
986   // Simple for and while loops
987   if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
988     if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
989         (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
990       loop_scale_factor = 25;
991     }
992   }
993   // Simple do-while loop
994   if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
995     loop_scale_factor = 25;
996   }
997 
998   BasicBlock* tbb = bb;
999   bool done = false;
1000   while (!done) {
1001     tbb->visited = true;
1002     for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
1003       if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
1004         // Skip any MIR pseudo-op.
1005         continue;
1006       }
1007       uint16_t flags = kAnalysisAttributes[mir->dalvikInsn.opcode];
1008       stats->dex_instructions += loop_scale_factor;
1009       if ((flags & kAnBranch) == 0) {
1010         computational_block &= ((flags & kAnComputational) != 0);
1011       } else {
1012         stats->branch_ops += loop_scale_factor;
1013       }
1014       if ((flags & kAnMath) != 0) {
1015         stats->math_ops += loop_scale_factor;
1016         has_math = true;
1017       }
1018       if ((flags & kAnFp) != 0) {
1019         stats->fp_ops += loop_scale_factor;
1020       }
1021       if ((flags & kAnArrayOp) != 0) {
1022         stats->array_ops += loop_scale_factor;
1023       }
1024       if ((flags & kAnHeavyWeight) != 0) {
1025         stats->heavyweight_ops += loop_scale_factor;
1026       }
1027       if ((flags & kAnSwitch) != 0) {
1028         stats->has_switch = true;
1029       }
1030     }
1031     if (tbb == ending_bb) {
1032       done = true;
1033     } else {
1034       tbb = GetBasicBlock(tbb->fall_through);
1035     }
1036   }
1037   if (has_math && computational_block && (loop_scale_factor > 1)) {
1038     stats->has_computational_loop = true;
1039   }
1040 }
1041 
ComputeSkipCompilation(MethodStats * stats,bool skip_default,std::string * skip_message)1042 bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
1043                                       std::string* skip_message) {
1044   float count = stats->dex_instructions;
1045   stats->math_ratio = stats->math_ops / count;
1046   stats->fp_ratio = stats->fp_ops / count;
1047   stats->branch_ratio = stats->branch_ops / count;
1048   stats->array_ratio = stats->array_ops / count;
1049   stats->heavyweight_ratio = stats->heavyweight_ops / count;
1050 
1051   if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
1052     LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
1053               << stats->math_ratio << ", fp:"
1054               << stats->fp_ratio << ", br:"
1055               << stats->branch_ratio << ", hw:"
1056               << stats->heavyweight_ratio << ", arr:"
1057               << stats->array_ratio << ", hot:"
1058               << stats->has_computational_loop << ", "
1059               << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1060   }
1061 
1062   // Computation intensive?
1063   if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
1064     return false;
1065   }
1066 
1067   // Complex, logic-intensive?
1068   if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
1069       stats->branch_ratio > 0.3) {
1070     return false;
1071   }
1072 
1073   // Significant floating point?
1074   if (stats->fp_ratio > 0.05) {
1075     return false;
1076   }
1077 
1078   // Significant generic math?
1079   if (stats->math_ratio > 0.3) {
1080     return false;
1081   }
1082 
1083   // If array-intensive, compiling is probably worthwhile.
1084   if (stats->array_ratio > 0.1) {
1085     return false;
1086   }
1087 
1088   // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
1089   if (stats->has_switch) {
1090     return false;
1091   }
1092 
1093   // If significant in size and high proportion of expensive operations, skip.
1094   if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
1095       (stats->heavyweight_ratio > 0.3)) {
1096     *skip_message = "Is a small method with heavyweight ratio " +
1097                     std::to_string(stats->heavyweight_ratio);
1098     return true;
1099   }
1100 
1101   return skip_default;
1102 }
1103 
1104  /*
1105   * Will eventually want this to be a bit more sophisticated and happen at verification time.
1106   */
SkipCompilation(std::string * skip_message)1107 bool MIRGraph::SkipCompilation(std::string* skip_message) {
1108   const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1109   CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1110   if (compiler_filter == CompilerOptions::kEverything) {
1111     return false;
1112   }
1113 
1114   // Contains a pattern we don't want to compile?
1115   if (PuntToInterpreter()) {
1116     *skip_message = "Punt to interpreter set";
1117     return true;
1118   }
1119 
1120   DCHECK(compiler_options.IsCompilationEnabled());
1121 
1122   // Set up compilation cutoffs based on current filter mode.
1123   size_t small_cutoff;
1124   size_t default_cutoff;
1125   switch (compiler_filter) {
1126     case CompilerOptions::kBalanced:
1127       small_cutoff = compiler_options.GetSmallMethodThreshold();
1128       default_cutoff = compiler_options.GetLargeMethodThreshold();
1129       break;
1130     case CompilerOptions::kSpace:
1131       small_cutoff = compiler_options.GetTinyMethodThreshold();
1132       default_cutoff = compiler_options.GetSmallMethodThreshold();
1133       break;
1134     case CompilerOptions::kSpeed:
1135     case CompilerOptions::kTime:
1136       small_cutoff = compiler_options.GetHugeMethodThreshold();
1137       default_cutoff = compiler_options.GetHugeMethodThreshold();
1138       break;
1139     default:
1140       LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1141       UNREACHABLE();
1142   }
1143 
1144   // If size < cutoff, assume we'll compile - but allow removal.
1145   bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
1146   if (skip_compilation) {
1147     *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
1148   }
1149 
1150   /*
1151    * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1152    * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1153    */
1154   if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
1155     skip_compilation = true;
1156     *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
1157     // If we're got a huge number of basic blocks, don't bother with further analysis.
1158     if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
1159       return true;
1160     }
1161   } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
1162     /* If it's large and contains no branches, it's likely to be machine generated initialization */
1163       (GetBranchCount() == 0)) {
1164     *skip_message = "Large method with no branches";
1165     return true;
1166   } else if (compiler_filter == CompilerOptions::kSpeed) {
1167     // If not huge, compile.
1168     return false;
1169   }
1170 
1171   // Filter 2: Skip class initializers.
1172   if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
1173     *skip_message = "Class initializer";
1174     return true;
1175   }
1176 
1177   // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
1178   if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1179       cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1180           ->IsSpecial(cu_->method_idx)) {
1181     return false;
1182   }
1183 
1184   // Filter 4: if small, just compile.
1185   if (GetNumDalvikInsns() < small_cutoff) {
1186     return false;
1187   }
1188 
1189   // Analyze graph for:
1190   //  o floating point computation
1191   //  o basic blocks contained in loop with heavy arithmetic.
1192   //  o proportion of conditional branches.
1193 
1194   MethodStats stats;
1195   memset(&stats, 0, sizeof(stats));
1196 
1197   ClearAllVisitedFlags();
1198   AllNodesIterator iter(this);
1199   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1200     AnalyzeBlock(bb, &stats);
1201   }
1202 
1203   return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
1204 }
1205 
DoCacheFieldLoweringInfo()1206 void MIRGraph::DoCacheFieldLoweringInfo() {
1207   static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
1208   // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1209   const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
1210   ScopedArenaAllocator allocator(&cu_->arena_stack);
1211   auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
1212   DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
1213       max_refs, kArenaAllocMisc);
1214   // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1215   size_t ifield_pos = 0u;
1216   size_t sfield_pos = max_refs;
1217   AllNodesIterator iter(this);
1218   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1219     if (bb->block_type != kDalvikByteCode) {
1220       continue;
1221     }
1222     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1223       // Get field index and try to find it among existing indexes. If found, it's usually among
1224       // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1225       // is a linear search, it actually performs much better than map based approach.
1226       const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
1227       const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
1228       if (is_iget_or_iput || is_iget_or_iput_quick) {
1229         uint32_t field_idx;
1230         DexMemAccessType access_type;
1231         if (is_iget_or_iput) {
1232           field_idx = mir->dalvikInsn.vC;
1233           access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
1234         } else {
1235           DCHECK(is_iget_or_iput_quick);
1236           // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
1237           // indexes.
1238           field_idx = mir->offset | kFieldIndexFlagQuickened;
1239           access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
1240         }
1241         size_t i = ifield_pos;
1242         while (i != 0u && field_idxs[i - 1] != field_idx) {
1243           --i;
1244         }
1245         if (i != 0u) {
1246           mir->meta.ifield_lowering_info = i - 1;
1247           DCHECK_EQ(field_types[i - 1], access_type);
1248         } else {
1249           mir->meta.ifield_lowering_info = ifield_pos;
1250           field_idxs[ifield_pos] = field_idx;
1251           field_types[ifield_pos] = access_type;
1252           ++ifield_pos;
1253         }
1254       } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
1255         auto field_idx = mir->dalvikInsn.vB;
1256         size_t i = sfield_pos;
1257         while (i != max_refs && field_idxs[i] != field_idx) {
1258           ++i;
1259         }
1260         if (i != max_refs) {
1261           mir->meta.sfield_lowering_info = max_refs - i - 1u;
1262           DCHECK_EQ(field_types[i], SGetOrSPutMemAccessType(mir->dalvikInsn.opcode));
1263         } else {
1264           mir->meta.sfield_lowering_info = max_refs - sfield_pos;
1265           --sfield_pos;
1266           field_idxs[sfield_pos] = field_idx;
1267           field_types[sfield_pos] = SGetOrSPutMemAccessType(mir->dalvikInsn.opcode);
1268         }
1269       }
1270       DCHECK_LE(ifield_pos, sfield_pos);
1271     }
1272   }
1273 
1274   if (ifield_pos != 0u) {
1275     // Resolve instance field infos.
1276     DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
1277     ifield_lowering_infos_.reserve(ifield_pos);
1278     for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1279       const uint32_t field_idx = field_idxs[pos];
1280       const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
1281       const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
1282       CHECK_LT(masked_field_idx, 1u << 16);
1283       ifield_lowering_infos_.push_back(
1284           MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
1285     }
1286     MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1287                                    ifield_lowering_infos_.data(), ifield_pos);
1288   }
1289 
1290   if (sfield_pos != max_refs) {
1291     // Resolve static field infos.
1292     DCHECK_EQ(sfield_lowering_infos_.size(), 0u);
1293     sfield_lowering_infos_.reserve(max_refs - sfield_pos);
1294     for (size_t pos = max_refs; pos != sfield_pos;) {
1295       --pos;
1296       sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos], field_types[pos]));
1297     }
1298     MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1299                                    sfield_lowering_infos_.data(), max_refs - sfield_pos);
1300   }
1301 }
1302 
DoCacheMethodLoweringInfo()1303 void MIRGraph::DoCacheMethodLoweringInfo() {
1304   static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1305   static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
1306 
1307   // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1308   struct MapEntry {
1309     // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1310     const MethodReference* devirt_target;
1311     uint32_t target_method_idx;
1312     uint32_t vtable_idx;
1313     uint16_t invoke_type;
1314     // Map value.
1315     uint32_t lowering_info_index;
1316   };
1317 
1318   struct MapEntryComparator {
1319     bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1320       if (lhs.target_method_idx != rhs.target_method_idx) {
1321         return lhs.target_method_idx < rhs.target_method_idx;
1322       }
1323       if (lhs.invoke_type != rhs.invoke_type) {
1324         return lhs.invoke_type < rhs.invoke_type;
1325       }
1326       if (lhs.vtable_idx != rhs.vtable_idx) {
1327         return lhs.vtable_idx < rhs.vtable_idx;
1328       }
1329       if (lhs.devirt_target != rhs.devirt_target) {
1330         if (lhs.devirt_target == nullptr) {
1331           return true;
1332         }
1333         if (rhs.devirt_target == nullptr) {
1334           return false;
1335         }
1336         return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1337       }
1338       return false;
1339     }
1340     MethodReferenceComparator devirt_cmp;
1341   };
1342 
1343   ScopedArenaAllocator allocator(&cu_->arena_stack);
1344 
1345   // All INVOKE instructions take 3 code units and there must also be a RETURN.
1346   const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
1347 
1348   // Map invoke key (see MapEntry) to lowering info index and vice versa.
1349   // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1350   // multi_index_container with one ordered index and one sequential index.
1351   ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
1352                                                           allocator.Adapter());
1353   const MapEntry** sequential_entries =
1354       allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
1355 
1356   // Find INVOKE insns and their devirtualization targets.
1357   const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1358   AllNodesIterator iter(this);
1359   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1360     if (bb->block_type != kDalvikByteCode) {
1361       continue;
1362     }
1363     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1364       const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
1365       const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
1366       if (is_quick_invoke || is_invoke) {
1367         uint32_t vtable_index = 0;
1368         uint32_t target_method_idx = 0;
1369         uint32_t invoke_type_idx = 0;  // Default to virtual (in case of quickened).
1370         DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
1371         if (is_quick_invoke) {
1372           // We need to store the vtable index since we can't necessarily recreate it at resolve
1373           // phase if the dequickening resolved to an interface method.
1374           vtable_index = mir->dalvikInsn.vB;
1375           // Fake up the method index by storing the mir offset so that we can read the dequicken
1376           // info in resolve.
1377           target_method_idx = mir->offset | kMethodIdxFlagQuickened;
1378         } else {
1379           DCHECK(is_invoke);
1380           // Decode target method index and invoke type.
1381           invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
1382           target_method_idx = mir->dalvikInsn.vB;
1383         }
1384         // Find devirtualization target.
1385         // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1386         // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1387         // and increment it as needed instead of making O(log n) lookups.
1388         const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1389         // Try to insert a new entry. If the insertion fails, we will have found an old one.
1390         MapEntry entry = {
1391             devirt_target,
1392             target_method_idx,
1393             vtable_index,
1394             invoke_types[invoke_type_idx],
1395             static_cast<uint32_t>(invoke_map.size())
1396         };
1397         auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
1398         mir->meta.method_lowering_info = it->lowering_info_index;
1399         // If we didn't actually insert, this will just overwrite an existing value with the same.
1400         sequential_entries[it->lowering_info_index] = &*it;
1401       }
1402     }
1403   }
1404   if (invoke_map.empty()) {
1405     return;
1406   }
1407   // Prepare unique method infos, set method info indexes for their MIRs.
1408   const size_t count = invoke_map.size();
1409   method_lowering_infos_.reserve(count);
1410   for (size_t pos = 0u; pos != count; ++pos) {
1411     const MapEntry* entry = sequential_entries[pos];
1412     const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
1413     const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
1414     MirMethodLoweringInfo method_info(masked_method_idx,
1415                                       static_cast<InvokeType>(entry->invoke_type), is_quick);
1416     if (entry->devirt_target != nullptr) {
1417       method_info.SetDevirtualizationTarget(*entry->devirt_target);
1418     }
1419     if (is_quick) {
1420       method_info.SetVTableIndex(entry->vtable_idx);
1421     }
1422     method_lowering_infos_.push_back(method_info);
1423   }
1424   MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1425                                  method_lowering_infos_.data(), count);
1426 }
1427 
SkipCompilationByName(const std::string & methodname)1428 bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
1429   return cu_->compiler_driver->SkipCompilation(methodname);
1430 }
1431 
1432 }  // namespace art
1433