1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_COMPILER_OPTIMIZING_NODES_H_
18 #define ART_COMPILER_OPTIMIZING_NODES_H_
19
20 #include <algorithm>
21 #include <array>
22 #include <type_traits>
23
24 #include "art_method.h"
25 #include "base/arena_allocator.h"
26 #include "base/arena_bit_vector.h"
27 #include "base/arena_containers.h"
28 #include "base/arena_object.h"
29 #include "base/array_ref.h"
30 #include "base/intrusive_forward_list.h"
31 #include "base/iteration_range.h"
32 #include "base/macros.h"
33 #include "base/mutex.h"
34 #include "base/quasi_atomic.h"
35 #include "base/stl_util.h"
36 #include "base/transform_array_ref.h"
37 #include "block_namer.h"
38 #include "class_root.h"
39 #include "compilation_kind.h"
40 #include "data_type.h"
41 #include "deoptimization_kind.h"
42 #include "dex/dex_file.h"
43 #include "dex/dex_file_types.h"
44 #include "dex/invoke_type.h"
45 #include "dex/method_reference.h"
46 #include "entrypoints/quick/quick_entrypoints_enum.h"
47 #include "handle.h"
48 #include "handle_cache.h"
49 #include "intrinsics_enum.h"
50 #include "locations.h"
51 #include "mirror/class.h"
52 #include "mirror/method_type.h"
53 #include "offsets.h"
54 #include "reference_type_info.h"
55
56 namespace art HIDDEN {
57
58 class ArenaStack;
59 class CodeGenerator;
60 class GraphChecker;
61 class HBasicBlock;
62 class HCondition;
63 class HConstructorFence;
64 class HCurrentMethod;
65 class HDoubleConstant;
66 class HEnvironment;
67 class HFloatConstant;
68 class HGraphBuilder;
69 class HGraphVisitor;
70 class HInstruction;
71 class HIntConstant;
72 class HInvoke;
73 class HLongConstant;
74 class HNullConstant;
75 class HParameterValue;
76 class HPhi;
77 class HSuspendCheck;
78 class HTryBoundary;
79 class HVecCondition;
80 class FieldInfo;
81 class LiveInterval;
82 class LocationSummary;
83 class ProfilingInfo;
84 class SlowPathCode;
85 class SsaBuilder;
86
87 namespace mirror {
88 class DexCache;
89 } // namespace mirror
90
91 static const int kDefaultNumberOfBlocks = 8;
92 static const int kDefaultNumberOfSuccessors = 2;
93 static const int kDefaultNumberOfPredecessors = 2;
94 static const int kDefaultNumberOfExceptionalPredecessors = 0;
95 static const int kDefaultNumberOfDominatedBlocks = 1;
96 static const int kDefaultNumberOfBackEdges = 1;
97
98 // The maximum (meaningful) distance (31) that can be used in an integer shift/rotate operation.
99 static constexpr int32_t kMaxIntShiftDistance = 0x1f;
100 // The maximum (meaningful) distance (63) that can be used in a long shift/rotate operation.
101 static constexpr int32_t kMaxLongShiftDistance = 0x3f;
102
103 static constexpr uint32_t kUnknownFieldIndex = static_cast<uint32_t>(-1);
104 static constexpr uint16_t kUnknownClassDefIndex = static_cast<uint16_t>(-1);
105
106 static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
107
108 static constexpr uint32_t kNoDexPc = -1;
109
IsSameDexFile(const DexFile & lhs,const DexFile & rhs)110 inline bool IsSameDexFile(const DexFile& lhs, const DexFile& rhs) {
111 // For the purposes of the compiler, the dex files must actually be the same object
112 // if we want to safely treat them as the same. This is especially important for JIT
113 // as custom class loaders can open the same underlying file (or memory) multiple
114 // times and provide different class resolution but no two class loaders should ever
115 // use the same DexFile object - doing so is an unsupported hack that can lead to
116 // all sorts of weird failures.
117 return &lhs == &rhs;
118 }
119
120 enum IfCondition {
121 // All types.
122 kCondEQ, // ==
123 kCondNE, // !=
124 // Signed integers and floating-point numbers.
125 kCondLT, // <
126 kCondLE, // <=
127 kCondGT, // >
128 kCondGE, // >=
129 // Unsigned integers.
130 kCondB, // <
131 kCondBE, // <=
132 kCondA, // >
133 kCondAE, // >=
134 // First and last aliases.
135 kCondFirst = kCondEQ,
136 kCondLast = kCondAE,
137 };
138
139 enum GraphAnalysisResult {
140 kAnalysisSkipped,
141 kAnalysisInvalidBytecode,
142 kAnalysisFailThrowCatchLoop,
143 kAnalysisFailAmbiguousArrayOp,
144 kAnalysisFailIrreducibleLoopAndStringInit,
145 kAnalysisFailPhiEquivalentInOsr,
146 kAnalysisSuccess,
147 };
148
149 std::ostream& operator<<(std::ostream& os, GraphAnalysisResult ga);
150
151 template <typename T>
MakeUnsigned(T x)152 static inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
153 return static_cast<typename std::make_unsigned<T>::type>(x);
154 }
155
156 class HInstructionList : public ValueObject {
157 public:
HInstructionList()158 HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {}
159
160 void AddInstruction(HInstruction* instruction);
161 void RemoveInstruction(HInstruction* instruction);
162
163 // Insert `instruction` before/after an existing instruction `cursor`.
164 void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
165 void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
166
167 // Return true if this list contains `instruction`.
168 bool Contains(HInstruction* instruction) const;
169
170 // Return true if `instruction1` is found before `instruction2` in
171 // this instruction list and false otherwise. Abort if none
172 // of these instructions is found.
173 bool FoundBefore(const HInstruction* instruction1,
174 const HInstruction* instruction2) const;
175
IsEmpty()176 bool IsEmpty() const { return first_instruction_ == nullptr; }
Clear()177 void Clear() { first_instruction_ = last_instruction_ = nullptr; }
178
179 // Update the block of all instructions to be `block`.
180 void SetBlockOfInstructions(HBasicBlock* block) const;
181
182 void AddAfter(HInstruction* cursor, const HInstructionList& instruction_list);
183 void AddBefore(HInstruction* cursor, const HInstructionList& instruction_list);
184 void Add(const HInstructionList& instruction_list);
185
186 // Return the number of instructions in the list. This is an expensive operation.
187 size_t CountSize() const;
188
189 private:
190 HInstruction* first_instruction_;
191 HInstruction* last_instruction_;
192
193 friend class HBasicBlock;
194 friend class HGraph;
195 friend class HInstruction;
196 friend class HInstructionIterator;
197 friend class HInstructionIteratorHandleChanges;
198 friend class HBackwardInstructionIterator;
199
200 DISALLOW_COPY_AND_ASSIGN(HInstructionList);
201 };
202
203 // Control-flow graph of a method. Contains a list of basic blocks.
204 class HGraph : public ArenaObject<kArenaAllocGraph> {
205 public:
206 HGraph(ArenaAllocator* allocator,
207 ArenaStack* arena_stack,
208 VariableSizedHandleScope* handles,
209 const DexFile& dex_file,
210 uint32_t method_idx,
211 InstructionSet instruction_set,
212 InvokeType invoke_type = kInvalidInvokeType,
213 bool dead_reference_safe = false,
214 bool debuggable = false,
215 CompilationKind compilation_kind = CompilationKind::kOptimized,
216 int start_instruction_id = 0)
allocator_(allocator)217 : allocator_(allocator),
218 arena_stack_(arena_stack),
219 handle_cache_(handles),
220 blocks_(allocator->Adapter(kArenaAllocBlockList)),
221 reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
222 linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
223 entry_block_(nullptr),
224 exit_block_(nullptr),
225 number_of_vregs_(0),
226 number_of_in_vregs_(0),
227 temporaries_vreg_slots_(0),
228 has_bounds_checks_(false),
229 has_try_catch_(false),
230 has_monitor_operations_(false),
231 has_traditional_simd_(false),
232 has_predicated_simd_(false),
233 has_loops_(false),
234 has_irreducible_loops_(false),
235 has_direct_critical_native_call_(false),
236 has_always_throwing_invokes_(false),
237 dead_reference_safe_(dead_reference_safe),
238 debuggable_(debuggable),
239 current_instruction_id_(start_instruction_id),
240 dex_file_(dex_file),
241 method_idx_(method_idx),
242 invoke_type_(invoke_type),
243 in_ssa_form_(false),
244 number_of_cha_guards_(0),
245 instruction_set_(instruction_set),
246 cached_null_constant_(nullptr),
247 cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
248 cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
249 cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
250 cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
251 cached_current_method_(nullptr),
252 art_method_(nullptr),
253 compilation_kind_(compilation_kind),
254 useful_optimizing_(false),
255 cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
256 blocks_.reserve(kDefaultNumberOfBlocks);
257 }
258
259 std::ostream& Dump(std::ostream& os,
260 CodeGenerator* codegen,
261 std::optional<std::reference_wrapper<const BlockNamer>> namer = std::nullopt);
262
GetAllocator()263 ArenaAllocator* GetAllocator() const { return allocator_; }
GetArenaStack()264 ArenaStack* GetArenaStack() const { return arena_stack_; }
265
GetHandleCache()266 HandleCache* GetHandleCache() { return &handle_cache_; }
267
GetBlocks()268 const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
269
270 // An iterator to only blocks that are still actually in the graph (when
271 // blocks are removed they are replaced with 'nullptr' in GetBlocks to
272 // simplify block-id assignment and avoid memmoves in the block-list).
GetActiveBlocks()273 IterationRange<FilterNull<ArenaVector<HBasicBlock*>::const_iterator>> GetActiveBlocks() const {
274 return FilterOutNull(MakeIterationRange(GetBlocks()));
275 }
276
IsInSsaForm()277 bool IsInSsaForm() const { return in_ssa_form_; }
SetInSsaForm()278 void SetInSsaForm() { in_ssa_form_ = true; }
279
GetEntryBlock()280 HBasicBlock* GetEntryBlock() const { return entry_block_; }
GetExitBlock()281 HBasicBlock* GetExitBlock() const { return exit_block_; }
HasExitBlock()282 bool HasExitBlock() const { return exit_block_ != nullptr; }
283
SetEntryBlock(HBasicBlock * block)284 void SetEntryBlock(HBasicBlock* block) { entry_block_ = block; }
SetExitBlock(HBasicBlock * block)285 void SetExitBlock(HBasicBlock* block) { exit_block_ = block; }
286
287 void AddBlock(HBasicBlock* block);
288
289 void ComputeDominanceInformation();
290 void ClearDominanceInformation();
291 void ClearLoopInformation();
292 void FindBackEdges(/*out*/ BitVectorView<size_t> visited);
293 GraphAnalysisResult BuildDominatorTree();
294 GraphAnalysisResult RecomputeDominatorTree();
295 void SimplifyCFG();
296 void SimplifyCatchBlocks();
297
298 // Analyze all natural loops in this graph. Returns a code specifying that it
299 // was successful or the reason for failure. The method will fail if a loop
300 // is a throw-catch loop, i.e. the header is a catch block.
301 GraphAnalysisResult AnalyzeLoops() const;
302
303 // Iterate over blocks to compute try block membership. Needs reverse post
304 // order and loop information.
305 void ComputeTryBlockInformation();
306
307 // Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
308 // Returns the instruction to replace the invoke expression or null if the
309 // invoke is for a void method. Note that the caller is responsible for replacing
310 // and removing the invoke instruction.
311 HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
312
313 // Update the loop and try membership of `block`, which was spawned from `reference`.
314 // In case `reference` is a back edge, `replace_if_back_edge` notifies whether `block`
315 // should be the new back edge.
316 // `has_more_specific_try_catch_info` will be set to true when inlining a try catch.
317 void UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
318 HBasicBlock* reference,
319 bool replace_if_back_edge,
320 bool has_more_specific_try_catch_info = false);
321
322 // Need to add a couple of blocks to test if the loop body is entered and
323 // put deoptimization instructions, etc.
324 void TransformLoopHeaderForBCE(HBasicBlock* header);
325
326 // Adds a new loop directly after the loop with the given header and exit.
327 // Returns the new preheader.
328 HBasicBlock* TransformLoopForVectorization(HBasicBlock* header,
329 HBasicBlock* body,
330 HBasicBlock* exit);
331
332 // Removes `block` from the graph. Assumes `block` has been disconnected from
333 // other blocks and has no instructions or phis.
334 void DeleteDeadEmptyBlock(HBasicBlock* block);
335
336 // Splits the edge between `block` and `successor` while preserving the
337 // indices in the predecessor/successor lists. If there are multiple edges
338 // between the blocks, the lowest indices are used.
339 // Returns the new block which is empty and has the same dex pc as `successor`.
340 HBasicBlock* SplitEdge(HBasicBlock* block, HBasicBlock* successor);
341
342 void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
343
344 // Splits the edge between `block` and `successor` and then updates the graph's RPO to keep
345 // consistency without recomputing the whole graph.
346 HBasicBlock* SplitEdgeAndUpdateRPO(HBasicBlock* block, HBasicBlock* successor);
347
348 void OrderLoopHeaderPredecessors(HBasicBlock* header);
349
350 // Transform a loop into a format with a single preheader.
351 //
352 // Each phi in the header should be split: original one in the header should only hold
353 // inputs reachable from the back edges and a single input from the preheader. The newly created
354 // phi in the preheader should collate the inputs from the original multiple incoming blocks.
355 //
356 // Loops in the graph typically have a single preheader, so this method is used to "repair" loops
357 // that no longer have this property.
358 void TransformLoopToSinglePreheaderFormat(HBasicBlock* header);
359
360 void SimplifyLoop(HBasicBlock* header);
361
362 ALWAYS_INLINE int32_t AllocateInstructionId();
363
GetCurrentInstructionId()364 int32_t GetCurrentInstructionId() const {
365 return current_instruction_id_;
366 }
367
SetCurrentInstructionId(int32_t id)368 void SetCurrentInstructionId(int32_t id) {
369 CHECK_GE(id, current_instruction_id_);
370 current_instruction_id_ = id;
371 }
372
UpdateTemporariesVRegSlots(size_t slots)373 void UpdateTemporariesVRegSlots(size_t slots) {
374 temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
375 }
376
GetTemporariesVRegSlots()377 size_t GetTemporariesVRegSlots() const {
378 DCHECK(!in_ssa_form_);
379 return temporaries_vreg_slots_;
380 }
381
SetNumberOfVRegs(uint16_t number_of_vregs)382 void SetNumberOfVRegs(uint16_t number_of_vregs) {
383 number_of_vregs_ = number_of_vregs;
384 }
385
GetNumberOfVRegs()386 uint16_t GetNumberOfVRegs() const {
387 return number_of_vregs_;
388 }
389
SetNumberOfInVRegs(uint16_t value)390 void SetNumberOfInVRegs(uint16_t value) {
391 number_of_in_vregs_ = value;
392 }
393
GetNumberOfInVRegs()394 uint16_t GetNumberOfInVRegs() const {
395 return number_of_in_vregs_;
396 }
397
GetNumberOfLocalVRegs()398 uint16_t GetNumberOfLocalVRegs() const {
399 DCHECK(!in_ssa_form_);
400 return number_of_vregs_ - number_of_in_vregs_;
401 }
402
GetReversePostOrder()403 const ArenaVector<HBasicBlock*>& GetReversePostOrder() const {
404 return reverse_post_order_;
405 }
406
GetReversePostOrderSkipEntryBlock()407 ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() const {
408 DCHECK(GetReversePostOrder()[0] == entry_block_);
409 return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
410 }
411
GetPostOrder()412 IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetPostOrder() const {
413 return ReverseRange(GetReversePostOrder());
414 }
415
GetLinearOrder()416 const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
417 return linear_order_;
418 }
419
GetLinearPostOrder()420 IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetLinearPostOrder() const {
421 return ReverseRange(GetLinearOrder());
422 }
423
HasBoundsChecks()424 bool HasBoundsChecks() const {
425 return has_bounds_checks_;
426 }
427
SetHasBoundsChecks(bool value)428 void SetHasBoundsChecks(bool value) {
429 has_bounds_checks_ = value;
430 }
431
432 // Is the code known to be robust against eliminating dead references
433 // and the effects of early finalization?
IsDeadReferenceSafe()434 bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
435
MarkDeadReferenceUnsafe()436 void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
437
IsDebuggable()438 bool IsDebuggable() const { return debuggable_; }
439
440 // Returns a constant of the given type and value. If it does not exist
441 // already, it is created and inserted into the graph. This method is only for
442 // integral types.
443 HConstant* GetConstant(DataType::Type type, int64_t value);
444
445 // TODO: This is problematic for the consistency of reference type propagation
446 // because it can be created anytime after the pass and thus it will be left
447 // with an invalid type.
448 HNullConstant* GetNullConstant();
449
450 HIntConstant* GetIntConstant(int32_t value);
451 HLongConstant* GetLongConstant(int64_t value);
452 HFloatConstant* GetFloatConstant(float value);
453 HDoubleConstant* GetDoubleConstant(double value);
454
455 HCurrentMethod* GetCurrentMethod();
456
GetDexFile()457 const DexFile& GetDexFile() const {
458 return dex_file_;
459 }
460
GetMethodIdx()461 uint32_t GetMethodIdx() const {
462 return method_idx_;
463 }
464
465 // Get the method name (without the signature), e.g. "<init>"
466 const char* GetMethodName() const;
467
468 // Get the pretty method name (class + name + optionally signature).
469 std::string PrettyMethod(bool with_signature = true) const;
470
GetInvokeType()471 InvokeType GetInvokeType() const {
472 return invoke_type_;
473 }
474
GetInstructionSet()475 InstructionSet GetInstructionSet() const {
476 return instruction_set_;
477 }
478
IsCompilingOsr()479 bool IsCompilingOsr() const { return compilation_kind_ == CompilationKind::kOsr; }
480
IsCompilingBaseline()481 bool IsCompilingBaseline() const { return compilation_kind_ == CompilationKind::kBaseline; }
482
GetCompilationKind()483 CompilationKind GetCompilationKind() const { return compilation_kind_; }
484
GetCHASingleImplementationList()485 ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
486 return cha_single_implementation_list_;
487 }
488
489 // In case of OSR we intend to use SuspendChecks as an entry point to the
490 // function; for debuggable graphs we might deoptimize to interpreter from
491 // SuspendChecks. In these cases we should always generate code for them.
SuspendChecksAreAllowedToNoOp()492 bool SuspendChecksAreAllowedToNoOp() const {
493 return !IsDebuggable() && !IsCompilingOsr();
494 }
495
AddCHASingleImplementationDependency(ArtMethod * method)496 void AddCHASingleImplementationDependency(ArtMethod* method) {
497 cha_single_implementation_list_.insert(method);
498 }
499
HasShouldDeoptimizeFlag()500 bool HasShouldDeoptimizeFlag() const {
501 return number_of_cha_guards_ != 0 || debuggable_;
502 }
503
HasTryCatch()504 bool HasTryCatch() const { return has_try_catch_; }
SetHasTryCatch(bool value)505 void SetHasTryCatch(bool value) { has_try_catch_ = value; }
506
HasMonitorOperations()507 bool HasMonitorOperations() const { return has_monitor_operations_; }
SetHasMonitorOperations(bool value)508 void SetHasMonitorOperations(bool value) { has_monitor_operations_ = value; }
509
HasTraditionalSIMD()510 bool HasTraditionalSIMD() { return has_traditional_simd_; }
SetHasTraditionalSIMD(bool value)511 void SetHasTraditionalSIMD(bool value) { has_traditional_simd_ = value; }
512
HasPredicatedSIMD()513 bool HasPredicatedSIMD() { return has_predicated_simd_; }
SetHasPredicatedSIMD(bool value)514 void SetHasPredicatedSIMD(bool value) { has_predicated_simd_ = value; }
515
HasSIMD()516 bool HasSIMD() const { return has_traditional_simd_ || has_predicated_simd_; }
517
HasLoops()518 bool HasLoops() const { return has_loops_; }
SetHasLoops(bool value)519 void SetHasLoops(bool value) { has_loops_ = value; }
520
HasIrreducibleLoops()521 bool HasIrreducibleLoops() const { return has_irreducible_loops_; }
SetHasIrreducibleLoops(bool value)522 void SetHasIrreducibleLoops(bool value) { has_irreducible_loops_ = value; }
523
HasDirectCriticalNativeCall()524 bool HasDirectCriticalNativeCall() const { return has_direct_critical_native_call_; }
SetHasDirectCriticalNativeCall(bool value)525 void SetHasDirectCriticalNativeCall(bool value) { has_direct_critical_native_call_ = value; }
526
HasAlwaysThrowingInvokes()527 bool HasAlwaysThrowingInvokes() const { return has_always_throwing_invokes_; }
SetHasAlwaysThrowingInvokes(bool value)528 void SetHasAlwaysThrowingInvokes(bool value) { has_always_throwing_invokes_ = value; }
529
GetArtMethod()530 ArtMethod* GetArtMethod() const { return art_method_; }
SetArtMethod(ArtMethod * method)531 void SetArtMethod(ArtMethod* method) { art_method_ = method; }
532
SetProfilingInfo(ProfilingInfo * info)533 void SetProfilingInfo(ProfilingInfo* info) { profiling_info_ = info; }
GetProfilingInfo()534 ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
535
GetInexactObjectRti()536 ReferenceTypeInfo GetInexactObjectRti() {
537 return ReferenceTypeInfo::Create(handle_cache_.GetObjectClassHandle(), /* is_exact= */ false);
538 }
539
GetNumberOfCHAGuards()540 uint32_t GetNumberOfCHAGuards() const { return number_of_cha_guards_; }
SetNumberOfCHAGuards(uint32_t num)541 void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
IncrementNumberOfCHAGuards()542 void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
543
SetUsefulOptimizing()544 void SetUsefulOptimizing() { useful_optimizing_ = true; }
IsUsefulOptimizing()545 bool IsUsefulOptimizing() const { return useful_optimizing_; }
546
547 private:
548 void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(BitVectorView<const size_t> visited) const;
549 void RemoveDeadBlocks(BitVectorView<const size_t> visited);
550
551 template <class InstructionType, typename ValueType>
552 InstructionType* CreateConstant(ValueType value,
553 ArenaSafeMap<ValueType, InstructionType*>* cache);
554
555 void InsertConstant(HConstant* instruction);
556
557 // Cache a float constant into the graph. This method should only be
558 // called by the SsaBuilder when creating "equivalent" instructions.
559 void CacheFloatConstant(HFloatConstant* constant);
560
561 // See CacheFloatConstant comment.
562 void CacheDoubleConstant(HDoubleConstant* constant);
563
564 ArenaAllocator* const allocator_;
565 ArenaStack* const arena_stack_;
566
567 HandleCache handle_cache_;
568
569 // List of blocks in insertion order.
570 ArenaVector<HBasicBlock*> blocks_;
571
572 // List of blocks to perform a reverse post order tree traversal.
573 ArenaVector<HBasicBlock*> reverse_post_order_;
574
575 // List of blocks to perform a linear order tree traversal. Unlike the reverse
576 // post order, this order is not incrementally kept up-to-date.
577 ArenaVector<HBasicBlock*> linear_order_;
578
579 HBasicBlock* entry_block_;
580 HBasicBlock* exit_block_;
581
582 // The number of virtual registers in this method. Contains the parameters.
583 uint16_t number_of_vregs_;
584
585 // The number of virtual registers used by parameters of this method.
586 uint16_t number_of_in_vregs_;
587
588 // Number of vreg size slots that the temporaries use (used in baseline compiler).
589 size_t temporaries_vreg_slots_;
590
591 // Flag whether there are bounds checks in the graph. We can skip
592 // BCE if it's false.
593 bool has_bounds_checks_;
594
595 // Flag whether there are try/catch blocks in the graph. We will skip
596 // try/catch-related passes if it's false.
597 bool has_try_catch_;
598
599 // Flag whether there are any HMonitorOperation in the graph. If yes this will mandate
600 // DexRegisterMap to be present to allow deadlock analysis for non-debuggable code.
601 bool has_monitor_operations_;
602
603 // Flags whether SIMD (traditional or predicated) instructions appear in the graph.
604 // If either is true, the code generators may have to be more careful spilling the wider
605 // contents of SIMD registers.
606 bool has_traditional_simd_;
607 bool has_predicated_simd_;
608
609 // Flag whether there are any loops in the graph. We can skip loop
610 // optimization if it's false.
611 bool has_loops_;
612
613 // Flag whether there are any irreducible loops in the graph.
614 bool has_irreducible_loops_;
615
616 // Flag whether there are any direct calls to native code registered
617 // for @CriticalNative methods.
618 bool has_direct_critical_native_call_;
619
620 // Flag whether the graph contains invokes that always throw.
621 bool has_always_throwing_invokes_;
622
623 // Is the code known to be robust against eliminating dead references
624 // and the effects of early finalization? If false, dead reference variables
625 // are kept if they might be visible to the garbage collector.
626 // Currently this means that the class was declared to be dead-reference-safe,
627 // the method accesses no reachability-sensitive fields or data, and the same
628 // is true for any methods that were inlined into the current one.
629 bool dead_reference_safe_;
630
631 // Indicates whether the graph should be compiled in a way that
632 // ensures full debuggability. If false, we can apply more
633 // aggressive optimizations that may limit the level of debugging.
634 const bool debuggable_;
635
636 // The current id to assign to a newly added instruction. See HInstruction.id_.
637 int32_t current_instruction_id_;
638
639 // The dex file from which the method is from.
640 const DexFile& dex_file_;
641
642 // The method index in the dex file.
643 const uint32_t method_idx_;
644
645 // If inlined, this encodes how the callee is being invoked.
646 const InvokeType invoke_type_;
647
648 // Whether the graph has been transformed to SSA form. Only used
649 // in debug mode to ensure we are not using properties only valid
650 // for non-SSA form (like the number of temporaries).
651 bool in_ssa_form_;
652
653 // Number of CHA guards in the graph. Used to short-circuit the
654 // CHA guard optimization pass when there is no CHA guard left.
655 uint32_t number_of_cha_guards_;
656
657 const InstructionSet instruction_set_;
658
659 // Cached constants.
660 HNullConstant* cached_null_constant_;
661 ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
662 ArenaSafeMap<int32_t, HFloatConstant*> cached_float_constants_;
663 ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
664 ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
665
666 HCurrentMethod* cached_current_method_;
667
668 // The ArtMethod this graph is for. Note that for AOT, it may be null,
669 // for example for methods whose declaring class could not be resolved
670 // (such as when the superclass could not be found).
671 ArtMethod* art_method_;
672
673 // The `ProfilingInfo` associated with the method being compiled.
674 ProfilingInfo* profiling_info_;
675
676 // How we are compiling the graph: either optimized, osr, or baseline.
677 // For osr, we will make all loops seen as irreducible and emit special
678 // stack maps to mark compiled code entries which the interpreter can
679 // directly jump to.
680 const CompilationKind compilation_kind_;
681
682 // Whether after compiling baseline it is still useful re-optimizing this
683 // method.
684 bool useful_optimizing_;
685
686 // List of methods that are assumed to have single implementation.
687 ArenaSet<ArtMethod*> cha_single_implementation_list_;
688
689 friend class SsaBuilder; // For caching constants.
690 friend class SsaLivenessAnalysis; // For the linear order.
691 friend class HInliner; // For the reverse post order.
692 ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
693 DISALLOW_COPY_AND_ASSIGN(HGraph);
694 };
695
696 class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
697 public:
HLoopInformation(HBasicBlock * header,HGraph * graph)698 HLoopInformation(HBasicBlock* header, HGraph* graph)
699 : header_(header),
700 suspend_check_(nullptr),
701 irreducible_(false),
702 contains_irreducible_loop_(false),
703 back_edges_(graph->GetAllocator()->Adapter(kArenaAllocLoopInfoBackEdges)),
704 // Make bit vector growable, as the number of blocks may change.
705 blocks_(graph->GetAllocator(),
706 graph->GetBlocks().size(),
707 true,
708 kArenaAllocLoopInfoBackEdges) {
709 back_edges_.reserve(kDefaultNumberOfBackEdges);
710 }
711
IsIrreducible()712 bool IsIrreducible() const { return irreducible_; }
ContainsIrreducibleLoop()713 bool ContainsIrreducibleLoop() const { return contains_irreducible_loop_; }
714
715 void Dump(std::ostream& os);
716
GetHeader()717 HBasicBlock* GetHeader() const {
718 return header_;
719 }
720
SetHeader(HBasicBlock * block)721 void SetHeader(HBasicBlock* block) {
722 header_ = block;
723 }
724
GetSuspendCheck()725 HSuspendCheck* GetSuspendCheck() const { return suspend_check_; }
SetSuspendCheck(HSuspendCheck * check)726 void SetSuspendCheck(HSuspendCheck* check) { suspend_check_ = check; }
HasSuspendCheck()727 bool HasSuspendCheck() const { return suspend_check_ != nullptr; }
728
AddBackEdge(HBasicBlock * back_edge)729 void AddBackEdge(HBasicBlock* back_edge) {
730 back_edges_.push_back(back_edge);
731 }
732
RemoveBackEdge(HBasicBlock * back_edge)733 void RemoveBackEdge(HBasicBlock* back_edge) {
734 RemoveElement(back_edges_, back_edge);
735 }
736
IsBackEdge(const HBasicBlock & block)737 bool IsBackEdge(const HBasicBlock& block) const {
738 return ContainsElement(back_edges_, &block);
739 }
740
NumberOfBackEdges()741 size_t NumberOfBackEdges() const {
742 return back_edges_.size();
743 }
744
745 HBasicBlock* GetPreHeader() const;
746
GetBackEdges()747 const ArenaVector<HBasicBlock*>& GetBackEdges() const {
748 return back_edges_;
749 }
750
751 // Returns the lifetime position of the back edge that has the
752 // greatest lifetime position.
753 size_t GetLifetimeEnd() const;
754
ReplaceBackEdge(HBasicBlock * existing,HBasicBlock * new_back_edge)755 void ReplaceBackEdge(HBasicBlock* existing, HBasicBlock* new_back_edge) {
756 ReplaceElement(back_edges_, existing, new_back_edge);
757 }
758
759 // Finds blocks that are part of this loop.
760 void Populate();
761
762 // Updates blocks population of the loop and all of its outer' ones recursively after the
763 // population of the inner loop is updated.
764 void PopulateInnerLoopUpwards(HLoopInformation* inner_loop);
765
766 // Returns whether this loop information contains `block`.
767 // Note that this loop information *must* be populated before entering this function.
768 bool Contains(const HBasicBlock& block) const;
769
770 // Returns whether this loop information is an inner loop of `other`.
771 // Note that `other` *must* be populated before entering this function.
772 bool IsIn(const HLoopInformation& other) const;
773
774 // Returns true if instruction is not defined within this loop.
775 bool IsDefinedOutOfTheLoop(HInstruction* instruction) const;
776
GetBlocks()777 const ArenaBitVector& GetBlocks() const { return blocks_; }
778
779 void Add(HBasicBlock* block);
780 void Remove(HBasicBlock* block);
781
ClearAllBlocks()782 void ClearAllBlocks() {
783 blocks_.ClearAllBits();
784 }
785
786 bool HasBackEdgeNotDominatedByHeader() const;
787
IsPopulated()788 bool IsPopulated() const {
789 return blocks_.GetHighestBitSet() != -1;
790 }
791
792 bool DominatesAllBackEdges(HBasicBlock* block);
793
794 bool HasExitEdge() const;
795
796 // Resets back edge and blocks-in-loop data.
ResetBasicBlockData()797 void ResetBasicBlockData() {
798 back_edges_.clear();
799 ClearAllBlocks();
800 }
801
802 private:
803 // Internal recursive implementation of `Populate`.
804 void PopulateRecursive(HBasicBlock* block);
805 void PopulateIrreducibleRecursive(HBasicBlock* block, ArenaBitVector* finalized);
806
807 HBasicBlock* header_;
808 HSuspendCheck* suspend_check_;
809 bool irreducible_;
810 bool contains_irreducible_loop_;
811 ArenaVector<HBasicBlock*> back_edges_;
812 ArenaBitVector blocks_;
813
814 DISALLOW_COPY_AND_ASSIGN(HLoopInformation);
815 };
816
817 // Stores try/catch information for basic blocks.
818 // Note that HGraph is constructed so that catch blocks cannot simultaneously
819 // be try blocks.
820 class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
821 public:
822 // Try block information constructor.
TryCatchInformation(const HTryBoundary & try_entry)823 explicit TryCatchInformation(const HTryBoundary& try_entry)
824 : try_entry_(&try_entry),
825 catch_dex_file_(nullptr),
826 catch_type_index_(dex::TypeIndex::Invalid()) {
827 DCHECK(try_entry_ != nullptr);
828 }
829
830 // Catch block information constructor.
TryCatchInformation(dex::TypeIndex catch_type_index,const DexFile & dex_file)831 TryCatchInformation(dex::TypeIndex catch_type_index, const DexFile& dex_file)
832 : try_entry_(nullptr),
833 catch_dex_file_(&dex_file),
834 catch_type_index_(catch_type_index) {}
835
IsTryBlock()836 bool IsTryBlock() const { return try_entry_ != nullptr; }
837
GetTryEntry()838 const HTryBoundary& GetTryEntry() const {
839 DCHECK(IsTryBlock());
840 return *try_entry_;
841 }
842
IsCatchBlock()843 bool IsCatchBlock() const { return catch_dex_file_ != nullptr; }
844
IsValidTypeIndex()845 bool IsValidTypeIndex() const {
846 DCHECK(IsCatchBlock());
847 return catch_type_index_.IsValid();
848 }
849
GetCatchTypeIndex()850 dex::TypeIndex GetCatchTypeIndex() const {
851 DCHECK(IsCatchBlock());
852 return catch_type_index_;
853 }
854
GetCatchDexFile()855 const DexFile& GetCatchDexFile() const {
856 DCHECK(IsCatchBlock());
857 return *catch_dex_file_;
858 }
859
SetInvalidTypeIndex()860 void SetInvalidTypeIndex() {
861 catch_type_index_ = dex::TypeIndex::Invalid();
862 }
863
864 private:
865 // One of possibly several TryBoundary instructions entering the block's try.
866 // Only set for try blocks.
867 const HTryBoundary* try_entry_;
868
869 // Exception type information. Only set for catch blocks.
870 const DexFile* catch_dex_file_;
871 dex::TypeIndex catch_type_index_;
872 };
873
874 static constexpr size_t kNoLifetime = -1;
875 static constexpr uint32_t kInvalidBlockId = static_cast<uint32_t>(-1);
876
877 // A block in a method. Contains the list of instructions represented
878 // as a double linked list. Each block knows its predecessors and
879 // successors.
880
881 class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
882 public:
883 explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
graph_(graph)884 : graph_(graph),
885 predecessors_(graph->GetAllocator()->Adapter(kArenaAllocPredecessors)),
886 successors_(graph->GetAllocator()->Adapter(kArenaAllocSuccessors)),
887 loop_information_(nullptr),
888 dominator_(nullptr),
889 dominated_blocks_(graph->GetAllocator()->Adapter(kArenaAllocDominated)),
890 block_id_(kInvalidBlockId),
891 dex_pc_(dex_pc),
892 lifetime_start_(kNoLifetime),
893 lifetime_end_(kNoLifetime),
894 try_catch_information_(nullptr) {
895 predecessors_.reserve(kDefaultNumberOfPredecessors);
896 successors_.reserve(kDefaultNumberOfSuccessors);
897 dominated_blocks_.reserve(kDefaultNumberOfDominatedBlocks);
898 }
899
GetPredecessors()900 const ArenaVector<HBasicBlock*>& GetPredecessors() const {
901 return predecessors_;
902 }
903
GetNumberOfPredecessors()904 size_t GetNumberOfPredecessors() const {
905 return GetPredecessors().size();
906 }
907
GetSuccessors()908 const ArenaVector<HBasicBlock*>& GetSuccessors() const {
909 return successors_;
910 }
911
912 ArrayRef<HBasicBlock* const> GetNormalSuccessors() const;
913 ArrayRef<HBasicBlock* const> GetExceptionalSuccessors() const;
914
915 bool HasSuccessor(const HBasicBlock* block, size_t start_from = 0u) {
916 return ContainsElement(successors_, block, start_from);
917 }
918
GetDominatedBlocks()919 const ArenaVector<HBasicBlock*>& GetDominatedBlocks() const {
920 return dominated_blocks_;
921 }
922
IsEntryBlock()923 bool IsEntryBlock() const {
924 return graph_->GetEntryBlock() == this;
925 }
926
IsExitBlock()927 bool IsExitBlock() const {
928 return graph_->GetExitBlock() == this;
929 }
930
931 bool IsSingleGoto() const;
932 bool IsSingleReturn() const;
933 bool IsSingleReturnOrReturnVoidAllowingPhis() const;
934 bool IsSingleTryBoundary() const;
935
936 // Returns true if this block emits nothing but a jump.
IsSingleJump()937 bool IsSingleJump() const {
938 HLoopInformation* loop_info = GetLoopInformation();
939 return (IsSingleGoto() || IsSingleTryBoundary())
940 // Back edges generate a suspend check.
941 && (loop_info == nullptr || !loop_info->IsBackEdge(*this));
942 }
943
AddBackEdge(HBasicBlock * back_edge)944 void AddBackEdge(HBasicBlock* back_edge) {
945 if (loop_information_ == nullptr) {
946 loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
947 }
948 DCHECK_EQ(loop_information_->GetHeader(), this);
949 loop_information_->AddBackEdge(back_edge);
950 }
951
952 // Registers a back edge; if the block was not a loop header before the call associates a newly
953 // created loop info with it.
954 //
955 // Used in SuperblockCloner to preserve LoopInformation object instead of reseting loop
956 // info for all blocks during back edges recalculation.
AddBackEdgeWhileUpdating(HBasicBlock * back_edge)957 void AddBackEdgeWhileUpdating(HBasicBlock* back_edge) {
958 if (loop_information_ == nullptr || loop_information_->GetHeader() != this) {
959 loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
960 }
961 loop_information_->AddBackEdge(back_edge);
962 }
963
GetGraph()964 HGraph* GetGraph() const { return graph_; }
SetGraph(HGraph * graph)965 void SetGraph(HGraph* graph) { graph_ = graph; }
966
GetBlockId()967 uint32_t GetBlockId() const { return block_id_; }
SetBlockId(int id)968 void SetBlockId(int id) { block_id_ = id; }
GetDexPc()969 uint32_t GetDexPc() const { return dex_pc_; }
970
GetDominator()971 HBasicBlock* GetDominator() const { return dominator_; }
SetDominator(HBasicBlock * dominator)972 void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
AddDominatedBlock(HBasicBlock * block)973 void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.push_back(block); }
974
RemoveDominatedBlock(HBasicBlock * block)975 void RemoveDominatedBlock(HBasicBlock* block) {
976 RemoveElement(dominated_blocks_, block);
977 }
978
ReplaceDominatedBlock(HBasicBlock * existing,HBasicBlock * new_block)979 void ReplaceDominatedBlock(HBasicBlock* existing, HBasicBlock* new_block) {
980 ReplaceElement(dominated_blocks_, existing, new_block);
981 }
982
983 void ClearDominanceInformation();
984
NumberOfBackEdges()985 int NumberOfBackEdges() const {
986 return IsLoopHeader() ? loop_information_->NumberOfBackEdges() : 0;
987 }
988
GetFirstInstruction()989 HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; }
GetLastInstruction()990 HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
GetInstructions()991 const HInstructionList& GetInstructions() const { return instructions_; }
GetFirstPhi()992 HInstruction* GetFirstPhi() const { return phis_.first_instruction_; }
GetLastPhi()993 HInstruction* GetLastPhi() const { return phis_.last_instruction_; }
GetPhis()994 const HInstructionList& GetPhis() const { return phis_; }
995
996 HInstruction* GetFirstInstructionDisregardMoves() const;
997
AddSuccessor(HBasicBlock * block)998 void AddSuccessor(HBasicBlock* block) {
999 successors_.push_back(block);
1000 block->predecessors_.push_back(this);
1001 }
1002
ReplaceSuccessor(HBasicBlock * existing,HBasicBlock * new_block)1003 void ReplaceSuccessor(HBasicBlock* existing, HBasicBlock* new_block) {
1004 size_t successor_index = GetSuccessorIndexOf(existing);
1005 existing->RemovePredecessor(this);
1006 new_block->predecessors_.push_back(this);
1007 successors_[successor_index] = new_block;
1008 }
1009
ReplacePredecessor(HBasicBlock * existing,HBasicBlock * new_block)1010 void ReplacePredecessor(HBasicBlock* existing, HBasicBlock* new_block) {
1011 size_t predecessor_index = GetPredecessorIndexOf(existing);
1012 existing->RemoveSuccessor(this);
1013 new_block->successors_.push_back(this);
1014 predecessors_[predecessor_index] = new_block;
1015 }
1016
1017 // Insert `this` between `predecessor` and `successor. This method
1018 // preserves the indices, and will update the first edge found between
1019 // `predecessor` and `successor`.
InsertBetween(HBasicBlock * predecessor,HBasicBlock * successor)1020 void InsertBetween(HBasicBlock* predecessor, HBasicBlock* successor) {
1021 size_t predecessor_index = successor->GetPredecessorIndexOf(predecessor);
1022 size_t successor_index = predecessor->GetSuccessorIndexOf(successor);
1023 successor->predecessors_[predecessor_index] = this;
1024 predecessor->successors_[successor_index] = this;
1025 successors_.push_back(successor);
1026 predecessors_.push_back(predecessor);
1027 }
1028
RemovePredecessor(HBasicBlock * block)1029 void RemovePredecessor(HBasicBlock* block) {
1030 predecessors_.erase(predecessors_.begin() + GetPredecessorIndexOf(block));
1031 }
1032
RemoveSuccessor(HBasicBlock * block)1033 void RemoveSuccessor(HBasicBlock* block) {
1034 successors_.erase(successors_.begin() + GetSuccessorIndexOf(block));
1035 }
1036
ClearAllPredecessors()1037 void ClearAllPredecessors() {
1038 predecessors_.clear();
1039 }
1040
AddPredecessor(HBasicBlock * block)1041 void AddPredecessor(HBasicBlock* block) {
1042 predecessors_.push_back(block);
1043 block->successors_.push_back(this);
1044 }
1045
SwapPredecessors()1046 void SwapPredecessors() {
1047 DCHECK_EQ(predecessors_.size(), 2u);
1048 std::swap(predecessors_[0], predecessors_[1]);
1049 }
1050
SwapSuccessors()1051 void SwapSuccessors() {
1052 DCHECK_EQ(successors_.size(), 2u);
1053 std::swap(successors_[0], successors_[1]);
1054 }
1055
GetPredecessorIndexOf(HBasicBlock * predecessor)1056 size_t GetPredecessorIndexOf(HBasicBlock* predecessor) const {
1057 return IndexOfElement(predecessors_, predecessor);
1058 }
1059
GetSuccessorIndexOf(HBasicBlock * successor)1060 size_t GetSuccessorIndexOf(HBasicBlock* successor) const {
1061 return IndexOfElement(successors_, successor);
1062 }
1063
GetSinglePredecessor()1064 HBasicBlock* GetSinglePredecessor() const {
1065 DCHECK_EQ(GetPredecessors().size(), 1u);
1066 return GetPredecessors()[0];
1067 }
1068
GetSingleSuccessor()1069 HBasicBlock* GetSingleSuccessor() const {
1070 DCHECK_EQ(GetSuccessors().size(), 1u);
1071 return GetSuccessors()[0];
1072 }
1073
1074 // Returns whether the first occurrence of `predecessor` in the list of
1075 // predecessors is at index `idx`.
IsFirstIndexOfPredecessor(HBasicBlock * predecessor,size_t idx)1076 bool IsFirstIndexOfPredecessor(HBasicBlock* predecessor, size_t idx) const {
1077 DCHECK_EQ(GetPredecessors()[idx], predecessor);
1078 return GetPredecessorIndexOf(predecessor) == idx;
1079 }
1080
1081 // Create a new block between this block and its predecessors. The new block
1082 // is added to the graph, all predecessor edges are relinked to it and an edge
1083 // is created to `this`. Returns the new empty block. Reverse post order or
1084 // loop and try/catch information are not updated.
1085 HBasicBlock* CreateImmediateDominator();
1086
1087 // Split the block into two blocks just before `cursor`. Returns the newly
1088 // created, latter block. Note that this method will add the block to the
1089 // graph, create a Goto at the end of the former block and will create an edge
1090 // between the blocks. It will not, however, update the reverse post order or
1091 // loop and try/catch information.
1092 HBasicBlock* SplitBefore(HInstruction* cursor, bool require_graph_not_in_ssa_form = true);
1093
1094 // Split the block into two blocks just before `cursor`. Returns the newly
1095 // created block. Note that this method just updates raw block information,
1096 // like predecessors, successors, dominators, and instruction list. It does not
1097 // update the graph, reverse post order, loop information, nor make sure the
1098 // blocks are consistent (for example ending with a control flow instruction).
1099 HBasicBlock* SplitBeforeForInlining(HInstruction* cursor);
1100
1101 // Similar to `SplitBeforeForInlining` but does it after `cursor`.
1102 HBasicBlock* SplitAfterForInlining(HInstruction* cursor);
1103
1104 // Merge `other` at the end of `this`. Successors and dominated blocks of
1105 // `other` are changed to be successors and dominated blocks of `this`. Note
1106 // that this method does not update the graph, reverse post order, loop
1107 // information, nor make sure the blocks are consistent (for example ending
1108 // with a control flow instruction).
1109 void MergeWithInlined(HBasicBlock* other);
1110
1111 // Replace `this` with `other`. Predecessors, successors, and dominated blocks
1112 // of `this` are moved to `other`.
1113 // Note that this method does not update the graph, reverse post order, loop
1114 // information, nor make sure the blocks are consistent (for example ending
1115 // with a control flow instruction).
1116 void ReplaceWith(HBasicBlock* other);
1117
1118 // Merges the instructions of `other` at the end of `this`.
1119 void MergeInstructionsWith(HBasicBlock* other);
1120
1121 // Merge `other` at the end of `this`. This method updates loops, reverse post
1122 // order, links to predecessors, successors, dominators and deletes the block
1123 // from the graph. The two blocks must be successive, i.e. `this` the only
1124 // predecessor of `other` and vice versa.
1125 void MergeWith(HBasicBlock* other);
1126
1127 // Disconnects `this` from all its predecessors, successors and dominator,
1128 // removes it from all loops it is included in and eventually from the graph.
1129 // The block must not dominate any other block. Predecessors and successors
1130 // are safely updated.
1131 void DisconnectAndDelete();
1132
1133 // Disconnects `this` from all its successors and updates their phis, if the successors have them.
1134 // If `visited` is provided, it will use the information to know if a successor is reachable and
1135 // skip updating those phis.
1136 void DisconnectFromSuccessors(BitVectorView<const size_t> visited = {});
1137
1138 // Removes the catch phi uses of the instructions in `this`, and then remove the instruction
1139 // itself. If `building_dominator_tree` is true, it will not remove the instruction as user, since
1140 // we do it in a previous step. This is a special case for building up the dominator tree: we want
1141 // to eliminate uses before inputs but we don't have domination information, so we remove all
1142 // connections from input/uses first before removing any instruction.
1143 // This method assumes the instructions have been removed from all users with the exception of
1144 // catch phis because of missing exceptional edges in the graph.
1145 void RemoveCatchPhiUsesAndInstruction(bool building_dominator_tree);
1146
1147 void AddInstruction(HInstruction* instruction);
1148 // Insert `instruction` before/after an existing instruction `cursor`.
1149 void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
1150 void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
1151 // Replace phi `initial` with `replacement` within this block.
1152 void ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement);
1153 // Replace instruction `initial` with `replacement` within this block.
1154 void ReplaceAndRemoveInstructionWith(HInstruction* initial,
1155 HInstruction* replacement);
1156 void AddPhi(HPhi* phi);
1157 void InsertPhiAfter(HPhi* instruction, HPhi* cursor);
1158 // RemoveInstruction and RemovePhi delete a given instruction from the respective
1159 // instruction list. With 'ensure_safety' set to true, it verifies that the
1160 // instruction is not in use and removes it from the use lists of its inputs.
1161 void RemoveInstruction(HInstruction* instruction, bool ensure_safety = true);
1162 void RemovePhi(HPhi* phi, bool ensure_safety = true);
1163 void RemoveInstructionOrPhi(HInstruction* instruction, bool ensure_safety = true);
1164
IsLoopHeader()1165 bool IsLoopHeader() const {
1166 return IsInLoop() && (loop_information_->GetHeader() == this);
1167 }
1168
IsLoopPreHeaderFirstPredecessor()1169 bool IsLoopPreHeaderFirstPredecessor() const {
1170 DCHECK(IsLoopHeader());
1171 return GetPredecessors()[0] == GetLoopInformation()->GetPreHeader();
1172 }
1173
IsFirstPredecessorBackEdge()1174 bool IsFirstPredecessorBackEdge() const {
1175 DCHECK(IsLoopHeader());
1176 return GetLoopInformation()->IsBackEdge(*GetPredecessors()[0]);
1177 }
1178
GetLoopInformation()1179 HLoopInformation* GetLoopInformation() const {
1180 return loop_information_;
1181 }
1182
1183 // Set the loop_information_ on this block. Overrides the current
1184 // loop_information if it is an outer loop of the passed loop information.
1185 // Note that this method is called while creating the loop information.
SetInLoop(HLoopInformation * info)1186 void SetInLoop(HLoopInformation* info) {
1187 if (IsLoopHeader()) {
1188 // Nothing to do. This just means `info` is an outer loop.
1189 } else if (!IsInLoop()) {
1190 loop_information_ = info;
1191 } else if (loop_information_->Contains(*info->GetHeader())) {
1192 // Block is currently part of an outer loop. Make it part of this inner loop.
1193 // Note that a non loop header having a loop information means this loop information
1194 // has already been populated
1195 loop_information_ = info;
1196 } else {
1197 // Block is part of an inner loop. Do not update the loop information.
1198 // Note that we cannot do the check `info->Contains(loop_information_)->GetHeader()`
1199 // at this point, because this method is being called while populating `info`.
1200 }
1201 }
1202
1203 // Raw update of the loop information.
SetLoopInformation(HLoopInformation * info)1204 void SetLoopInformation(HLoopInformation* info) {
1205 loop_information_ = info;
1206 }
1207
IsInLoop()1208 bool IsInLoop() const { return loop_information_ != nullptr; }
1209
GetTryCatchInformation()1210 TryCatchInformation* GetTryCatchInformation() const { return try_catch_information_; }
1211
SetTryCatchInformation(TryCatchInformation * try_catch_information)1212 void SetTryCatchInformation(TryCatchInformation* try_catch_information) {
1213 try_catch_information_ = try_catch_information;
1214 }
1215
IsTryBlock()1216 bool IsTryBlock() const {
1217 return try_catch_information_ != nullptr && try_catch_information_->IsTryBlock();
1218 }
1219
IsCatchBlock()1220 bool IsCatchBlock() const {
1221 return try_catch_information_ != nullptr && try_catch_information_->IsCatchBlock();
1222 }
1223
1224 // Returns the try entry that this block's successors should have. They will
1225 // be in the same try, unless the block ends in a try boundary. In that case,
1226 // the appropriate try entry will be returned.
1227 const HTryBoundary* ComputeTryEntryOfSuccessors() const;
1228
1229 bool HasThrowingInstructions() const;
1230
1231 // Returns whether this block dominates the blocked passed as parameter.
1232 bool Dominates(const HBasicBlock* block) const;
1233
GetLifetimeStart()1234 size_t GetLifetimeStart() const { return lifetime_start_; }
GetLifetimeEnd()1235 size_t GetLifetimeEnd() const { return lifetime_end_; }
1236
SetLifetimeStart(size_t start)1237 void SetLifetimeStart(size_t start) { lifetime_start_ = start; }
SetLifetimeEnd(size_t end)1238 void SetLifetimeEnd(size_t end) { lifetime_end_ = end; }
1239
1240 bool EndsWithControlFlowInstruction() const;
1241 bool EndsWithReturn() const;
1242 bool EndsWithIf() const;
1243 bool EndsWithTryBoundary() const;
1244 bool HasSinglePhi() const;
1245
1246 private:
1247 HGraph* graph_;
1248 ArenaVector<HBasicBlock*> predecessors_;
1249 ArenaVector<HBasicBlock*> successors_;
1250 HInstructionList instructions_;
1251 HInstructionList phis_;
1252 HLoopInformation* loop_information_;
1253 HBasicBlock* dominator_;
1254 ArenaVector<HBasicBlock*> dominated_blocks_;
1255 uint32_t block_id_;
1256 // The dex program counter of the first instruction of this block.
1257 const uint32_t dex_pc_;
1258 size_t lifetime_start_;
1259 size_t lifetime_end_;
1260 TryCatchInformation* try_catch_information_;
1261
1262 friend class HGraph;
1263 friend class HInstruction;
1264 // Allow manual control of the ordering of predecessors/successors
1265 friend class OptimizingUnitTestHelper;
1266
1267 DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
1268 };
1269
1270 // Iterates over the LoopInformation of all loops which contain 'block'
1271 // from the innermost to the outermost.
1272 class HLoopInformationOutwardIterator : public ValueObject {
1273 public:
HLoopInformationOutwardIterator(const HBasicBlock & block)1274 explicit HLoopInformationOutwardIterator(const HBasicBlock& block)
1275 : current_(block.GetLoopInformation()) {}
1276
Done()1277 bool Done() const { return current_ == nullptr; }
1278
Advance()1279 void Advance() {
1280 DCHECK(!Done());
1281 current_ = current_->GetPreHeader()->GetLoopInformation();
1282 }
1283
Current()1284 HLoopInformation* Current() const {
1285 DCHECK(!Done());
1286 return current_;
1287 }
1288
1289 private:
1290 HLoopInformation* current_;
1291
1292 DISALLOW_COPY_AND_ASSIGN(HLoopInformationOutwardIterator);
1293 };
1294
1295 #define FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(M) \
1296 M(Above, Condition) \
1297 M(AboveOrEqual, Condition) \
1298 M(Abs, UnaryOperation) \
1299 M(Add, BinaryOperation) \
1300 M(And, BinaryOperation) \
1301 M(ArrayGet, Instruction) \
1302 M(ArrayLength, Instruction) \
1303 M(ArraySet, Instruction) \
1304 M(Below, Condition) \
1305 M(BelowOrEqual, Condition) \
1306 M(BitwiseNegatedRight, BinaryOperation) \
1307 M(BooleanNot, UnaryOperation) \
1308 M(BoundsCheck, Instruction) \
1309 M(BoundType, Instruction) \
1310 M(CheckCast, Instruction) \
1311 M(ClassTableGet, Instruction) \
1312 M(ClearException, Instruction) \
1313 M(ClinitCheck, Instruction) \
1314 M(Compare, BinaryOperation) \
1315 M(ConstructorFence, Instruction) \
1316 M(CurrentMethod, Instruction) \
1317 M(ShouldDeoptimizeFlag, Instruction) \
1318 M(Deoptimize, Instruction) \
1319 M(Div, BinaryOperation) \
1320 M(DivZeroCheck, Instruction) \
1321 M(DoubleConstant, Constant) \
1322 M(Equal, Condition) \
1323 M(Exit, Instruction) \
1324 M(FloatConstant, Constant) \
1325 M(Goto, Instruction) \
1326 M(GreaterThan, Condition) \
1327 M(GreaterThanOrEqual, Condition) \
1328 M(If, Instruction) \
1329 M(InstanceFieldGet, FieldAccess) \
1330 M(InstanceFieldSet, FieldAccess) \
1331 M(InstanceOf, Instruction) \
1332 M(IntConstant, Constant) \
1333 M(IntermediateAddress, Instruction) \
1334 M(InvokeUnresolved, Invoke) \
1335 M(InvokeInterface, Invoke) \
1336 M(InvokeStaticOrDirect, Invoke) \
1337 M(InvokeVirtual, Invoke) \
1338 M(InvokePolymorphic, Invoke) \
1339 M(InvokeCustom, Invoke) \
1340 M(LessThan, Condition) \
1341 M(LessThanOrEqual, Condition) \
1342 M(LoadClass, Instruction) \
1343 M(LoadException, Instruction) \
1344 M(LoadMethodHandle, Instruction) \
1345 M(LoadMethodType, Instruction) \
1346 M(LoadString, Instruction) \
1347 M(LongConstant, Constant) \
1348 M(Max, Instruction) \
1349 M(MemoryBarrier, Instruction) \
1350 M(MethodEntryHook, Instruction) \
1351 M(MethodExitHook, Instruction) \
1352 M(Min, BinaryOperation) \
1353 M(MonitorOperation, Instruction) \
1354 M(Mul, BinaryOperation) \
1355 M(Neg, UnaryOperation) \
1356 M(NewArray, Instruction) \
1357 M(NewInstance, Instruction) \
1358 M(Nop, Instruction) \
1359 M(Not, UnaryOperation) \
1360 M(NotEqual, Condition) \
1361 M(NullConstant, Instruction) \
1362 M(NullCheck, Instruction) \
1363 M(Or, BinaryOperation) \
1364 M(PackedSwitch, Instruction) \
1365 M(ParallelMove, Instruction) \
1366 M(ParameterValue, Instruction) \
1367 M(Phi, Instruction) \
1368 M(Rem, BinaryOperation) \
1369 M(Return, Instruction) \
1370 M(ReturnVoid, Instruction) \
1371 M(Rol, BinaryOperation) \
1372 M(Ror, BinaryOperation) \
1373 M(Shl, BinaryOperation) \
1374 M(Shr, BinaryOperation) \
1375 M(StaticFieldGet, FieldAccess) \
1376 M(StaticFieldSet, FieldAccess) \
1377 M(StringBuilderAppend, Instruction) \
1378 M(UnresolvedInstanceFieldGet, Instruction) \
1379 M(UnresolvedInstanceFieldSet, Instruction) \
1380 M(UnresolvedStaticFieldGet, Instruction) \
1381 M(UnresolvedStaticFieldSet, Instruction) \
1382 M(Select, Instruction) \
1383 M(Sub, BinaryOperation) \
1384 M(SuspendCheck, Instruction) \
1385 M(Throw, Instruction) \
1386 M(TryBoundary, Instruction) \
1387 M(TypeConversion, Instruction) \
1388 M(UShr, BinaryOperation) \
1389 M(Xor, BinaryOperation)
1390
1391 #define FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(M) \
1392 M(VecReplicateScalar, VecUnaryOperation) \
1393 M(VecExtractScalar, VecUnaryOperation) \
1394 M(VecReduce, VecUnaryOperation) \
1395 M(VecCnv, VecUnaryOperation) \
1396 M(VecNeg, VecUnaryOperation) \
1397 M(VecAbs, VecUnaryOperation) \
1398 M(VecNot, VecUnaryOperation) \
1399 M(VecAdd, VecBinaryOperation) \
1400 M(VecHalvingAdd, VecBinaryOperation) \
1401 M(VecSub, VecBinaryOperation) \
1402 M(VecMul, VecBinaryOperation) \
1403 M(VecDiv, VecBinaryOperation) \
1404 M(VecMin, VecBinaryOperation) \
1405 M(VecMax, VecBinaryOperation) \
1406 M(VecAnd, VecBinaryOperation) \
1407 M(VecAndNot, VecBinaryOperation) \
1408 M(VecOr, VecBinaryOperation) \
1409 M(VecXor, VecBinaryOperation) \
1410 M(VecSaturationAdd, VecBinaryOperation) \
1411 M(VecSaturationSub, VecBinaryOperation) \
1412 M(VecShl, VecBinaryOperation) \
1413 M(VecShr, VecBinaryOperation) \
1414 M(VecUShr, VecBinaryOperation) \
1415 M(VecSetScalars, VecOperation) \
1416 M(VecMultiplyAccumulate, VecOperation) \
1417 M(VecSADAccumulate, VecOperation) \
1418 M(VecDotProd, VecOperation) \
1419 M(VecLoad, VecMemoryOperation) \
1420 M(VecStore, VecMemoryOperation) \
1421 M(VecPredSetAll, VecPredSetOperation) \
1422 M(VecPredWhile, VecPredSetOperation) \
1423 M(VecPredToBoolean, VecOperation) \
1424 M(VecEqual, VecCondition) \
1425 M(VecNotEqual, VecCondition) \
1426 M(VecLessThan, VecCondition) \
1427 M(VecLessThanOrEqual, VecCondition) \
1428 M(VecGreaterThan, VecCondition) \
1429 M(VecGreaterThanOrEqual, VecCondition) \
1430 M(VecBelow, VecCondition) \
1431 M(VecBelowOrEqual, VecCondition) \
1432 M(VecAbove, VecCondition) \
1433 M(VecAboveOrEqual, VecCondition) \
1434 M(VecPredNot, VecPredSetOperation)
1435
1436 #define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
1437 FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(M) \
1438 FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(M)
1439
1440 /*
1441 * Instructions, shared across several (not all) architectures.
1442 */
1443 #if !defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_ENABLE_CODEGEN_arm64)
1444 #define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M)
1445 #else
1446 #define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) \
1447 M(DataProcWithShifterOp, Instruction) \
1448 M(MultiplyAccumulate, Instruction) \
1449 M(IntermediateAddressIndex, Instruction)
1450 #endif
1451
1452 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
1453
1454 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
1455
1456 #if defined(ART_ENABLE_CODEGEN_riscv64)
1457 #define FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(M) M(Riscv64ShiftAdd, Instruction)
1458 #else
1459 #define FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(M)
1460 #endif
1461
1462 #ifndef ART_ENABLE_CODEGEN_x86
1463 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
1464 #else
1465 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
1466 M(X86ComputeBaseMethodAddress, Instruction) \
1467 M(X86LoadFromConstantTable, Instruction) \
1468 M(X86FPNeg, Instruction) \
1469 M(X86PackedSwitch, Instruction)
1470 #endif
1471
1472 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
1473 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M) \
1474 M(X86AndNot, Instruction) \
1475 M(X86MaskOrResetLeastSetBit, Instruction)
1476 #else
1477 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
1478 #endif
1479
1480 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
1481
1482 #define FOR_EACH_CONCRETE_INSTRUCTION(M) \
1483 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
1484 FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) \
1485 FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
1486 FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
1487 FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(M) \
1488 FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
1489 FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M) \
1490 FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
1491
1492 #define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
1493 M(Condition, BinaryOperation) \
1494 M(Constant, Instruction) \
1495 M(UnaryOperation, Instruction) \
1496 M(BinaryOperation, Instruction) \
1497 M(FieldAccess, Instruction) \
1498 M(Invoke, Instruction) \
1499 M(VecOperation, Instruction) \
1500 M(VecUnaryOperation, VecOperation) \
1501 M(VecBinaryOperation, VecOperation) \
1502 M(VecMemoryOperation, VecOperation) \
1503 M(VecPredSetOperation, VecOperation) \
1504 M(VecCondition, VecPredSetOperation)
1505
1506 #define FOR_EACH_INSTRUCTION(M) \
1507 FOR_EACH_CONCRETE_INSTRUCTION(M) \
1508 FOR_EACH_ABSTRACT_INSTRUCTION(M)
1509
1510 #define FORWARD_DECLARATION(type, super) class H##type;
FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)1511 FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
1512 #undef FORWARD_DECLARATION
1513
1514 #define DECLARE_INSTRUCTION(type) \
1515 private: \
1516 H##type& operator=(const H##type&) = delete; \
1517 public: \
1518 const char* DebugName() const override { return #type; } \
1519 HInstruction* Clone(ArenaAllocator* arena) const override { \
1520 DCHECK(IsClonable()); \
1521 return new (arena) H##type(*this); \
1522 } \
1523 void Accept(HGraphVisitor* visitor) override
1524
1525 #define DECLARE_ABSTRACT_INSTRUCTION(type) \
1526 private: \
1527 H##type& operator=(const H##type&) = delete; \
1528 public:
1529
1530 #define DEFAULT_COPY_CONSTRUCTOR(type) H##type(const H##type& other) = default;
1531
1532 template <typename T>
1533 class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
1534 public IntrusiveForwardListNode<HUseListNode<T>> {
1535 public:
1536 // Get the instruction which has this use as one of the inputs.
1537 T GetUser() const { return user_; }
1538 // Get the position of the input record that this use corresponds to.
1539 size_t GetIndex() const { return index_; }
1540 // Set the position of the input record that this use corresponds to.
1541 void SetIndex(size_t index) { index_ = index; }
1542
1543 private:
1544 HUseListNode(T user, size_t index)
1545 : user_(user), index_(index) {}
1546
1547 T const user_;
1548 size_t index_;
1549
1550 friend class HInstruction;
1551
1552 DISALLOW_COPY_AND_ASSIGN(HUseListNode);
1553 };
1554
1555 template <typename T>
1556 using HUseList = IntrusiveForwardList<HUseListNode<T>>;
1557
1558 // This class is used by HEnvironment and HInstruction classes to record the
1559 // instructions they use and pointers to the corresponding HUseListNodes kept
1560 // by the used instructions.
1561 template <typename T>
1562 class HUserRecord : public ValueObject {
1563 public:
HUserRecord()1564 HUserRecord() : instruction_(nullptr), before_use_node_() {}
HUserRecord(HInstruction * instruction)1565 explicit HUserRecord(HInstruction* instruction) : instruction_(instruction), before_use_node_() {}
1566
HUserRecord(const HUserRecord<T> & old_record,typename HUseList<T>::iterator before_use_node)1567 HUserRecord(const HUserRecord<T>& old_record, typename HUseList<T>::iterator before_use_node)
1568 : HUserRecord(old_record.instruction_, before_use_node) {}
HUserRecord(HInstruction * instruction,typename HUseList<T>::iterator before_use_node)1569 HUserRecord(HInstruction* instruction, typename HUseList<T>::iterator before_use_node)
1570 : instruction_(instruction), before_use_node_(before_use_node) {
1571 DCHECK(instruction_ != nullptr);
1572 }
1573
GetInstruction()1574 HInstruction* GetInstruction() const { return instruction_; }
GetBeforeUseNode()1575 typename HUseList<T>::iterator GetBeforeUseNode() const { return before_use_node_; }
GetUseNode()1576 typename HUseList<T>::iterator GetUseNode() const { return ++GetBeforeUseNode(); }
1577
1578 private:
1579 // Instruction used by the user.
1580 HInstruction* instruction_;
1581
1582 // Iterator before the corresponding entry in the use list kept by 'instruction_'.
1583 typename HUseList<T>::iterator before_use_node_;
1584 };
1585
1586 // Helper class that extracts the input instruction from HUserRecord<HInstruction*>.
1587 // This is used for HInstruction::GetInputs() to return a container wrapper providing
1588 // HInstruction* values even though the underlying container has HUserRecord<>s.
1589 struct HInputExtractor {
operatorHInputExtractor1590 HInstruction* operator()(HUserRecord<HInstruction*>& record) const {
1591 return record.GetInstruction();
1592 }
operatorHInputExtractor1593 const HInstruction* operator()(const HUserRecord<HInstruction*>& record) const {
1594 return record.GetInstruction();
1595 }
1596 };
1597
1598 using HInputsRef = TransformArrayRef<HUserRecord<HInstruction*>, HInputExtractor>;
1599 using HConstInputsRef = TransformArrayRef<const HUserRecord<HInstruction*>, HInputExtractor>;
1600
1601 /**
1602 * Side-effects representation.
1603 *
1604 * For write/read dependences on fields/arrays, the dependence analysis uses
1605 * type disambiguation (e.g. a float field write cannot modify the value of an
1606 * integer field read) and the access type (e.g. a reference array write cannot
1607 * modify the value of a reference field read [although it may modify the
1608 * reference fetch prior to reading the field, which is represented by its own
1609 * write/read dependence]). The analysis makes conservative points-to
1610 * assumptions on reference types (e.g. two same typed arrays are assumed to be
1611 * the same, and any reference read depends on any reference read without
1612 * further regard of its type).
1613 *
1614 * kDependsOnGCBit is defined in the following way: instructions with kDependsOnGCBit must not be
1615 * alive across the point where garbage collection might happen.
1616 *
1617 * Note: Instructions with kCanTriggerGCBit do not depend on each other.
1618 *
1619 * kCanTriggerGCBit must be used for instructions for which GC might happen on the path across
1620 * those instructions from the compiler perspective (between this instruction and the next one
1621 * in the IR).
1622 *
1623 * Note: Instructions which can cause GC only on a fatal slow path do not need
1624 * kCanTriggerGCBit as the execution never returns to the instruction next to the exceptional
1625 * one. However the execution may return to compiled code if there is a catch block in the
1626 * current method; for this purpose the TryBoundary exit instruction has kCanTriggerGCBit
1627 * set.
1628 *
1629 * The internal representation uses 38-bit and is described in the table below.
1630 * The first line indicates the side effect, and for field/array accesses the
1631 * second line indicates the type of the access (in the order of the
1632 * DataType::Type enum).
1633 * The two numbered lines below indicate the bit position in the bitfield (read
1634 * vertically).
1635 *
1636 * |Depends on GC|ARRAY-R |FIELD-R |Can trigger GC|ARRAY-W |FIELD-W |
1637 * +-------------+---------+---------+--------------+---------+---------+
1638 * | |DFJISCBZL|DFJISCBZL| |DFJISCBZL|DFJISCBZL|
1639 * | 3 |333333322|222222221| 1 |111111110|000000000|
1640 * | 7 |654321098|765432109| 8 |765432109|876543210|
1641 *
1642 * Note that, to ease the implementation, 'changes' bits are least significant
1643 * bits, while 'dependency' bits are most significant bits.
1644 */
1645 class SideEffects : public ValueObject {
1646 public:
SideEffects()1647 SideEffects() : flags_(0) {}
1648
None()1649 static SideEffects None() {
1650 return SideEffects(0);
1651 }
1652
All()1653 static SideEffects All() {
1654 return SideEffects(kAllChangeBits | kAllDependOnBits);
1655 }
1656
AllChanges()1657 static SideEffects AllChanges() {
1658 return SideEffects(kAllChangeBits);
1659 }
1660
AllDependencies()1661 static SideEffects AllDependencies() {
1662 return SideEffects(kAllDependOnBits);
1663 }
1664
AllExceptGCDependency()1665 static SideEffects AllExceptGCDependency() {
1666 return AllWritesAndReads().Union(SideEffects::CanTriggerGC());
1667 }
1668
AllWritesAndReads()1669 static SideEffects AllWritesAndReads() {
1670 return SideEffects(kAllWrites | kAllReads);
1671 }
1672
AllWrites()1673 static SideEffects AllWrites() {
1674 return SideEffects(kAllWrites);
1675 }
1676
AllReads()1677 static SideEffects AllReads() {
1678 return SideEffects(kAllReads);
1679 }
1680
FieldWriteOfType(DataType::Type type,bool is_volatile)1681 static SideEffects FieldWriteOfType(DataType::Type type, bool is_volatile) {
1682 return is_volatile
1683 ? AllWritesAndReads()
1684 : SideEffects(TypeFlag(type, kFieldWriteOffset));
1685 }
1686
ArrayWriteOfType(DataType::Type type)1687 static SideEffects ArrayWriteOfType(DataType::Type type) {
1688 return SideEffects(TypeFlag(type, kArrayWriteOffset));
1689 }
1690
FieldReadOfType(DataType::Type type,bool is_volatile)1691 static SideEffects FieldReadOfType(DataType::Type type, bool is_volatile) {
1692 return is_volatile
1693 ? AllWritesAndReads()
1694 : SideEffects(TypeFlag(type, kFieldReadOffset));
1695 }
1696
ArrayReadOfType(DataType::Type type)1697 static SideEffects ArrayReadOfType(DataType::Type type) {
1698 return SideEffects(TypeFlag(type, kArrayReadOffset));
1699 }
1700
1701 // Returns whether GC might happen across this instruction from the compiler perspective so
1702 // the next instruction in the IR would see that.
1703 //
1704 // See the SideEffect class comments.
CanTriggerGC()1705 static SideEffects CanTriggerGC() {
1706 return SideEffects(1ULL << kCanTriggerGCBit);
1707 }
1708
1709 // Returns whether the instruction must not be alive across a GC point.
1710 //
1711 // See the SideEffect class comments.
DependsOnGC()1712 static SideEffects DependsOnGC() {
1713 return SideEffects(1ULL << kDependsOnGCBit);
1714 }
1715
1716 // Combines the side-effects of this and the other.
Union(SideEffects other)1717 SideEffects Union(SideEffects other) const {
1718 return SideEffects(flags_ | other.flags_);
1719 }
1720
Exclusion(SideEffects other)1721 SideEffects Exclusion(SideEffects other) const {
1722 return SideEffects(flags_ & ~other.flags_);
1723 }
1724
Add(SideEffects other)1725 void Add(SideEffects other) {
1726 flags_ |= other.flags_;
1727 }
1728
Includes(SideEffects other)1729 bool Includes(SideEffects other) const {
1730 return (other.flags_ & flags_) == other.flags_;
1731 }
1732
HasSideEffects()1733 bool HasSideEffects() const {
1734 return (flags_ & kAllChangeBits) != 0u;
1735 }
1736
HasDependencies()1737 bool HasDependencies() const {
1738 return (flags_ & kAllDependOnBits) != 0u;
1739 }
1740
1741 // Returns true if there are no side effects or dependencies.
DoesNothing()1742 bool DoesNothing() const {
1743 return flags_ == 0u;
1744 }
1745
1746 // Returns true if something is written.
DoesAnyWrite()1747 bool DoesAnyWrite() const {
1748 return (flags_ & kAllWrites) != 0u;
1749 }
1750
1751 // Returns true if something is read.
DoesAnyRead()1752 bool DoesAnyRead() const {
1753 return (flags_ & kAllReads) != 0u;
1754 }
1755
1756 // Returns true if potentially everything is written and read
1757 // (every type and every kind of access).
DoesAllReadWrite()1758 bool DoesAllReadWrite() const {
1759 return (flags_ & (kAllWrites | kAllReads)) == (kAllWrites | kAllReads);
1760 }
1761
DoesAll()1762 bool DoesAll() const {
1763 return flags_ == (kAllChangeBits | kAllDependOnBits);
1764 }
1765
1766 // Returns true if `this` may read something written by `other`.
MayDependOn(SideEffects other)1767 bool MayDependOn(SideEffects other) const {
1768 const uint64_t depends_on_flags = (flags_ & kAllDependOnBits) >> kChangeBits;
1769 return (other.flags_ & depends_on_flags) != 0u;
1770 }
1771
1772 // Returns string representation of flags (for debugging only).
1773 // Format: |x|DFJISCBZL|DFJISCBZL|y|DFJISCBZL|DFJISCBZL|
ToString()1774 std::string ToString() const {
1775 std::string flags = "|";
1776 for (int s = kLastBit; s >= 0; s--) {
1777 bool current_bit_is_set = ((flags_ >> s) & 1) != 0;
1778 if ((s == kDependsOnGCBit) || (s == kCanTriggerGCBit)) {
1779 // This is a bit for the GC side effect.
1780 if (current_bit_is_set) {
1781 flags += "GC";
1782 }
1783 flags += "|";
1784 } else {
1785 // This is a bit for the array/field analysis.
1786 // The underscore character stands for the 'can trigger GC' bit.
1787 static const char *kDebug = "LZBCSIJFDLZBCSIJFD_LZBCSIJFDLZBCSIJFD";
1788 if (current_bit_is_set) {
1789 flags += kDebug[s];
1790 }
1791 if ((s == kFieldWriteOffset) || (s == kArrayWriteOffset) ||
1792 (s == kFieldReadOffset) || (s == kArrayReadOffset)) {
1793 flags += "|";
1794 }
1795 }
1796 }
1797 return flags;
1798 }
1799
Equals(const SideEffects & other)1800 bool Equals(const SideEffects& other) const { return flags_ == other.flags_; }
1801
1802 private:
1803 static constexpr int kFieldArrayAnalysisBits = 9;
1804
1805 static constexpr int kFieldWriteOffset = 0;
1806 static constexpr int kArrayWriteOffset = kFieldWriteOffset + kFieldArrayAnalysisBits;
1807 static constexpr int kLastBitForWrites = kArrayWriteOffset + kFieldArrayAnalysisBits - 1;
1808 static constexpr int kCanTriggerGCBit = kLastBitForWrites + 1;
1809
1810 static constexpr int kChangeBits = kCanTriggerGCBit + 1;
1811
1812 static constexpr int kFieldReadOffset = kCanTriggerGCBit + 1;
1813 static constexpr int kArrayReadOffset = kFieldReadOffset + kFieldArrayAnalysisBits;
1814 static constexpr int kLastBitForReads = kArrayReadOffset + kFieldArrayAnalysisBits - 1;
1815 static constexpr int kDependsOnGCBit = kLastBitForReads + 1;
1816
1817 static constexpr int kLastBit = kDependsOnGCBit;
1818 static constexpr int kDependOnBits = kLastBit + 1 - kChangeBits;
1819
1820 // Aliases.
1821
1822 static_assert(kChangeBits == kDependOnBits,
1823 "the 'change' bits should match the 'depend on' bits.");
1824
1825 static constexpr uint64_t kAllChangeBits = ((1ULL << kChangeBits) - 1);
1826 static constexpr uint64_t kAllDependOnBits = ((1ULL << kDependOnBits) - 1) << kChangeBits;
1827 static constexpr uint64_t kAllWrites =
1828 ((1ULL << (kLastBitForWrites + 1 - kFieldWriteOffset)) - 1) << kFieldWriteOffset;
1829 static constexpr uint64_t kAllReads =
1830 ((1ULL << (kLastBitForReads + 1 - kFieldReadOffset)) - 1) << kFieldReadOffset;
1831
1832 // Translates type to bit flag. The type must correspond to a Java type.
TypeFlag(DataType::Type type,int offset)1833 static uint64_t TypeFlag(DataType::Type type, int offset) {
1834 int shift;
1835 switch (type) {
1836 case DataType::Type::kReference: shift = 0; break;
1837 case DataType::Type::kBool: shift = 1; break;
1838 case DataType::Type::kInt8: shift = 2; break;
1839 case DataType::Type::kUint16: shift = 3; break;
1840 case DataType::Type::kInt16: shift = 4; break;
1841 case DataType::Type::kInt32: shift = 5; break;
1842 case DataType::Type::kInt64: shift = 6; break;
1843 case DataType::Type::kFloat32: shift = 7; break;
1844 case DataType::Type::kFloat64: shift = 8; break;
1845 default:
1846 LOG(FATAL) << "Unexpected data type " << type;
1847 UNREACHABLE();
1848 }
1849 DCHECK_LE(kFieldWriteOffset, shift);
1850 DCHECK_LT(shift, kArrayWriteOffset);
1851 return UINT64_C(1) << (shift + offset);
1852 }
1853
1854 // Private constructor on direct flags value.
SideEffects(uint64_t flags)1855 explicit SideEffects(uint64_t flags) : flags_(flags) {}
1856
1857 uint64_t flags_;
1858 };
1859
1860 // A HEnvironment object contains the values of virtual registers at a given location.
1861 class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
1862 public:
Create(ArenaAllocator * allocator,size_t number_of_vregs,ArtMethod * method,uint32_t dex_pc,HInstruction * holder)1863 static HEnvironment* Create(ArenaAllocator* allocator,
1864 size_t number_of_vregs,
1865 ArtMethod* method,
1866 uint32_t dex_pc,
1867 HInstruction* holder) {
1868 // The storage for vreg records is allocated right after the `HEnvironment` itself.
1869 static_assert(IsAligned<alignof(HUserRecord<HEnvironment*>)>(sizeof(HEnvironment)));
1870 static_assert(IsAligned<alignof(HUserRecord<HEnvironment*>)>(ArenaAllocator::kAlignment));
1871 size_t alloc_size = sizeof(HEnvironment) + number_of_vregs * sizeof(HUserRecord<HEnvironment*>);
1872 void* storage = allocator->Alloc(alloc_size, kArenaAllocEnvironment);
1873 return new (storage) HEnvironment(number_of_vregs, method, dex_pc, holder);
1874 }
1875
Create(ArenaAllocator * allocator,const HEnvironment & to_copy,HInstruction * holder)1876 static HEnvironment* Create(ArenaAllocator* allocator,
1877 const HEnvironment& to_copy,
1878 HInstruction* holder) {
1879 return Create(allocator, to_copy.Size(), to_copy.GetMethod(), to_copy.GetDexPc(), holder);
1880 }
1881
AllocateLocations(ArenaAllocator * allocator)1882 void AllocateLocations(ArenaAllocator* allocator) {
1883 DCHECK(locations_ == nullptr);
1884 if (Size() != 0u) {
1885 locations_ = allocator->AllocArray<Location>(Size(), kArenaAllocEnvironmentLocations);
1886 }
1887 }
1888
SetAndCopyParentChain(ArenaAllocator * allocator,HEnvironment * parent)1889 void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
1890 if (parent_ != nullptr) {
1891 parent_->SetAndCopyParentChain(allocator, parent);
1892 } else {
1893 parent_ = Create(allocator, *parent, holder_);
1894 parent_->CopyFrom(allocator, parent);
1895 if (parent->GetParent() != nullptr) {
1896 parent_->SetAndCopyParentChain(allocator, parent->GetParent());
1897 }
1898 }
1899 }
1900
1901 void CopyFrom(ArenaAllocator* allocator, ArrayRef<HInstruction* const> locals);
1902 void CopyFrom(ArenaAllocator* allocator, const HEnvironment* environment);
1903
1904 // Copy from `env`. If it's a loop phi for `loop_header`, copy the first
1905 // input to the loop phi instead. This is for inserting instructions that
1906 // require an environment (like HDeoptimization) in the loop pre-header.
1907 void CopyFromWithLoopPhiAdjustment(ArenaAllocator* allocator,
1908 HEnvironment* env,
1909 HBasicBlock* loop_header);
1910
SetRawEnvAt(size_t index,HInstruction * instruction)1911 void SetRawEnvAt(size_t index, HInstruction* instruction) {
1912 GetVRegs()[index] = HUserRecord<HEnvironment*>(instruction);
1913 }
1914
GetInstructionAt(size_t index)1915 HInstruction* GetInstructionAt(size_t index) const {
1916 return GetVRegs()[index].GetInstruction();
1917 }
1918
1919 void RemoveAsUserOfInput(size_t index) const;
1920
1921 // Replaces the input at the position 'index' with the replacement; the replacement and old
1922 // input instructions' env_uses_ lists are adjusted. The function works similar to
1923 // HInstruction::ReplaceInput.
1924 void ReplaceInput(HInstruction* replacement, size_t index);
1925
Size()1926 size_t Size() const { return number_of_vregs_; }
1927
GetParent()1928 HEnvironment* GetParent() const { return parent_; }
1929
SetLocationAt(size_t index,Location location)1930 void SetLocationAt(size_t index, Location location) {
1931 DCHECK_LT(index, number_of_vregs_);
1932 DCHECK(locations_ != nullptr);
1933 locations_[index] = location;
1934 }
1935
GetLocationAt(size_t index)1936 Location GetLocationAt(size_t index) const {
1937 DCHECK_LT(index, number_of_vregs_);
1938 DCHECK(locations_ != nullptr);
1939 return locations_[index];
1940 }
1941
GetDexPc()1942 uint32_t GetDexPc() const {
1943 return dex_pc_;
1944 }
1945
GetMethod()1946 ArtMethod* GetMethod() const {
1947 return method_;
1948 }
1949
GetHolder()1950 HInstruction* GetHolder() const {
1951 return holder_;
1952 }
1953
1954
IsFromInlinedInvoke()1955 bool IsFromInlinedInvoke() const {
1956 return GetParent() != nullptr;
1957 }
1958
1959 class EnvInputSelector {
1960 public:
EnvInputSelector(const HEnvironment * e)1961 explicit EnvInputSelector(const HEnvironment* e) : env_(e) {}
operator()1962 HInstruction* operator()(size_t s) const {
1963 return env_->GetInstructionAt(s);
1964 }
1965 private:
1966 const HEnvironment* env_;
1967 };
1968
1969 using HConstEnvInputRef = TransformIterator<CountIter, EnvInputSelector>;
GetEnvInputs()1970 IterationRange<HConstEnvInputRef> GetEnvInputs() const {
1971 IterationRange<CountIter> range(Range(Size()));
1972 return MakeIterationRange(MakeTransformIterator(range.begin(), EnvInputSelector(this)),
1973 MakeTransformIterator(range.end(), EnvInputSelector(this)));
1974 }
1975
1976 private:
HEnvironment(size_t number_of_vregs,ArtMethod * method,uint32_t dex_pc,HInstruction * holder)1977 ALWAYS_INLINE HEnvironment(size_t number_of_vregs,
1978 ArtMethod* method,
1979 uint32_t dex_pc,
1980 HInstruction* holder)
1981 : number_of_vregs_(dchecked_integral_cast<uint32_t>(number_of_vregs)),
1982 dex_pc_(dex_pc),
1983 holder_(holder),
1984 parent_(nullptr),
1985 method_(method),
1986 locations_(nullptr) {
1987 }
1988
GetVRegs()1989 ArrayRef<HUserRecord<HEnvironment*>> GetVRegs() {
1990 auto* vregs = reinterpret_cast<HUserRecord<HEnvironment*>*>(this + 1);
1991 return ArrayRef<HUserRecord<HEnvironment*>>(vregs, number_of_vregs_);
1992 }
1993
GetVRegs()1994 ArrayRef<const HUserRecord<HEnvironment*>> GetVRegs() const {
1995 auto* vregs = reinterpret_cast<const HUserRecord<HEnvironment*>*>(this + 1);
1996 return ArrayRef<const HUserRecord<HEnvironment*>>(vregs, number_of_vregs_);
1997 }
1998
1999 const uint32_t number_of_vregs_;
2000 const uint32_t dex_pc_;
2001
2002 // The instruction that holds this environment.
2003 HInstruction* const holder_;
2004
2005 // The parent environment for inlined code.
2006 HEnvironment* parent_;
2007
2008 // The environment's method, if resolved.
2009 ArtMethod* method_;
2010
2011 // Locations assigned by the register allocator.
2012 Location* locations_;
2013
2014 friend class HInstruction;
2015
2016 DISALLOW_COPY_AND_ASSIGN(HEnvironment);
2017 };
2018
2019 std::ostream& operator<<(std::ostream& os, const HInstruction& rhs);
2020
2021 // Iterates over the Environments
2022 class HEnvironmentIterator : public ValueObject {
2023 public:
2024 using iterator_category = std::forward_iterator_tag;
2025 using value_type = HEnvironment*;
2026 using difference_type = ptrdiff_t;
2027 using pointer = void;
2028 using reference = void;
2029
HEnvironmentIterator(HEnvironment * cur)2030 explicit HEnvironmentIterator(HEnvironment* cur) : cur_(cur) {}
2031
2032 HEnvironment* operator*() const {
2033 return cur_;
2034 }
2035
2036 HEnvironmentIterator& operator++() {
2037 DCHECK(cur_ != nullptr);
2038 cur_ = cur_->GetParent();
2039 return *this;
2040 }
2041
2042 HEnvironmentIterator operator++(int) {
2043 HEnvironmentIterator prev(*this);
2044 ++(*this);
2045 return prev;
2046 }
2047
2048 bool operator==(const HEnvironmentIterator& other) const {
2049 return other.cur_ == cur_;
2050 }
2051
2052 bool operator!=(const HEnvironmentIterator& other) const {
2053 return !(*this == other);
2054 }
2055
2056 private:
2057 HEnvironment* cur_;
2058 };
2059
2060 class HInstruction : public ArenaObject<kArenaAllocInstruction> {
2061 public:
2062 #define DECLARE_KIND(type, super) k##type,
2063 enum InstructionKind { // private marker to avoid generate-operator-out.py from processing.
2064 FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_KIND)
2065 kLastInstructionKind
2066 };
2067 #undef DECLARE_KIND
2068
HInstruction(InstructionKind kind,SideEffects side_effects,uint32_t dex_pc)2069 HInstruction(InstructionKind kind, SideEffects side_effects, uint32_t dex_pc)
2070 : HInstruction(kind, DataType::Type::kVoid, side_effects, dex_pc) {}
2071
HInstruction(InstructionKind kind,DataType::Type type,SideEffects side_effects,uint32_t dex_pc)2072 HInstruction(InstructionKind kind, DataType::Type type, SideEffects side_effects, uint32_t dex_pc)
2073 : previous_(nullptr),
2074 next_(nullptr),
2075 block_(nullptr),
2076 dex_pc_(dex_pc),
2077 id_(-1),
2078 ssa_index_(-1),
2079 packed_fields_(0u),
2080 environment_(nullptr),
2081 locations_(nullptr),
2082 live_interval_(nullptr),
2083 lifetime_position_(kNoLifetime),
2084 side_effects_(side_effects),
2085 reference_type_handle_(ReferenceTypeInfo::CreateInvalid().GetTypeHandle()) {
2086 SetPackedField<InstructionKindField>(kind);
2087 SetPackedField<TypeField>(type);
2088 SetPackedFlag<kFlagReferenceTypeIsExact>(ReferenceTypeInfo::CreateInvalid().IsExact());
2089 }
2090
~HInstruction()2091 virtual ~HInstruction() {}
2092
2093 std::ostream& Dump(std::ostream& os, bool dump_args = false);
2094
2095 // Helper for dumping without argument information using operator<<
2096 struct NoArgsDump {
2097 const HInstruction* ins;
2098 };
DumpWithoutArgs()2099 NoArgsDump DumpWithoutArgs() const {
2100 return NoArgsDump{this};
2101 }
2102 // Helper for dumping with argument information using operator<<
2103 struct ArgsDump {
2104 const HInstruction* ins;
2105 };
DumpWithArgs()2106 ArgsDump DumpWithArgs() const {
2107 return ArgsDump{this};
2108 }
2109
GetNext()2110 HInstruction* GetNext() const { return next_; }
GetPrevious()2111 HInstruction* GetPrevious() const { return previous_; }
2112
2113 HInstruction* GetNextDisregardingMoves() const;
2114 HInstruction* GetPreviousDisregardingMoves() const;
2115
GetBlock()2116 HBasicBlock* GetBlock() const { return block_; }
SetBlock(HBasicBlock * block)2117 void SetBlock(HBasicBlock* block) { block_ = block; }
IsInBlock()2118 bool IsInBlock() const { return block_ != nullptr; }
IsInLoop()2119 bool IsInLoop() const { return block_->IsInLoop(); }
IsLoopHeaderPhi()2120 bool IsLoopHeaderPhi() const { return IsPhi() && block_->IsLoopHeader(); }
IsIrreducibleLoopHeaderPhi()2121 bool IsIrreducibleLoopHeaderPhi() const {
2122 return IsLoopHeaderPhi() && GetBlock()->GetLoopInformation()->IsIrreducible();
2123 }
2124
2125 virtual ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() = 0;
2126
GetInputRecords()2127 ArrayRef<const HUserRecord<HInstruction*>> GetInputRecords() const {
2128 // One virtual method is enough, just const_cast<> and then re-add the const.
2129 return ArrayRef<const HUserRecord<HInstruction*>>(
2130 const_cast<HInstruction*>(this)->GetInputRecords());
2131 }
2132
GetInputs()2133 HInputsRef GetInputs() {
2134 return MakeTransformArrayRef(GetInputRecords(), HInputExtractor());
2135 }
2136
GetInputs()2137 HConstInputsRef GetInputs() const {
2138 return MakeTransformArrayRef(GetInputRecords(), HInputExtractor());
2139 }
2140
InputCount()2141 size_t InputCount() const { return GetInputRecords().size(); }
InputAt(size_t i)2142 HInstruction* InputAt(size_t i) const { return InputRecordAt(i).GetInstruction(); }
2143
HasInput(HInstruction * input)2144 bool HasInput(HInstruction* input) const {
2145 for (const HInstruction* i : GetInputs()) {
2146 if (i == input) {
2147 return true;
2148 }
2149 }
2150 return false;
2151 }
2152
SetRawInputAt(size_t index,HInstruction * input)2153 void SetRawInputAt(size_t index, HInstruction* input) {
2154 SetRawInputRecordAt(index, HUserRecord<HInstruction*>(input));
2155 }
2156
2157 virtual void Accept(HGraphVisitor* visitor) = 0;
2158 virtual const char* DebugName() const = 0;
2159
GetType()2160 DataType::Type GetType() const {
2161 return TypeField::Decode(GetPackedFields());
2162 }
2163
NeedsEnvironment()2164 virtual bool NeedsEnvironment() const { return false; }
NeedsBss()2165 virtual bool NeedsBss() const {
2166 return false;
2167 }
2168
GetDexPc()2169 uint32_t GetDexPc() const { return dex_pc_; }
2170
IsControlFlow()2171 virtual bool IsControlFlow() const { return false; }
2172
2173 // Can the instruction throw?
2174 // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
2175 // could throw OOME, but it is still OK to remove them if they are unused.
CanThrow()2176 virtual bool CanThrow() const { return false; }
2177
2178 // Does the instruction always throw an exception unconditionally?
AlwaysThrows()2179 virtual bool AlwaysThrows() const { return false; }
2180 // Will this instruction only cause async exceptions if it causes any at all?
OnlyThrowsAsyncExceptions()2181 virtual bool OnlyThrowsAsyncExceptions() const {
2182 return false;
2183 }
2184
CanThrowIntoCatchBlock()2185 bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
2186
HasSideEffects()2187 bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
DoesAnyWrite()2188 bool DoesAnyWrite() const { return side_effects_.DoesAnyWrite(); }
2189
2190 // Does not apply for all instructions, but having this at top level greatly
2191 // simplifies the null check elimination.
2192 // TODO: Consider merging can_be_null into ReferenceTypeInfo.
CanBeNull()2193 virtual bool CanBeNull() const {
2194 DCHECK_EQ(GetType(), DataType::Type::kReference) << "CanBeNull only applies to reference types";
2195 return true;
2196 }
2197
CanDoImplicitNullCheckOn(HInstruction * obj)2198 virtual bool CanDoImplicitNullCheckOn([[maybe_unused]] HInstruction* obj) const { return false; }
2199
2200 // If this instruction will do an implicit null check, return the `HNullCheck` associated
2201 // with it. Otherwise return null.
GetImplicitNullCheck()2202 HNullCheck* GetImplicitNullCheck() const {
2203 // Go over previous non-move instructions that are emitted at use site.
2204 HInstruction* prev_not_move = GetPreviousDisregardingMoves();
2205 while (prev_not_move != nullptr && prev_not_move->IsEmittedAtUseSite()) {
2206 if (prev_not_move->IsNullCheck()) {
2207 return prev_not_move->AsNullCheck();
2208 }
2209 prev_not_move = prev_not_move->GetPreviousDisregardingMoves();
2210 }
2211 return nullptr;
2212 }
2213
IsActualObject()2214 virtual bool IsActualObject() const {
2215 return GetType() == DataType::Type::kReference;
2216 }
2217
2218 // Sets the ReferenceTypeInfo. The RTI must be valid.
2219 void SetReferenceTypeInfo(ReferenceTypeInfo rti);
2220 // Same as above, but we only set it if it's valid. Otherwise, we don't change the current RTI.
2221 void SetReferenceTypeInfoIfValid(ReferenceTypeInfo rti);
2222
GetReferenceTypeInfo()2223 ReferenceTypeInfo GetReferenceTypeInfo() const {
2224 DCHECK_EQ(GetType(), DataType::Type::kReference);
2225 return ReferenceTypeInfo::CreateUnchecked(reference_type_handle_,
2226 GetPackedFlag<kFlagReferenceTypeIsExact>());
2227 }
2228
AddUseAt(ArenaAllocator * allocator,HInstruction * user,size_t index)2229 void AddUseAt(ArenaAllocator* allocator, HInstruction* user, size_t index) {
2230 DCHECK(user != nullptr);
2231 HUseListNode<HInstruction*>* new_node =
2232 new (allocator) HUseListNode<HInstruction*>(user, index);
2233 // Note: `old_begin` remains valid across `push_front()`.
2234 auto old_begin = uses_.begin();
2235 uses_.push_front(*new_node);
2236 // To speed up this code, we inline the
2237 // FixUpUserRecordsAfterUseInsertion(
2238 // old_begin != uses_.end() ? ++old_begin : old_begin);
2239 // to reduce branching as we know that we're going to fix up either one or two entries.
2240 auto new_begin = uses_.begin();
2241 user->SetRawInputRecordAt(index, HUserRecord<HInstruction*>(this, uses_.before_begin()));
2242 if (old_begin != uses_.end()) {
2243 HInstruction* old_begin_user = old_begin->GetUser();
2244 size_t old_begin_index = old_begin->GetIndex();
2245 old_begin_user->SetRawInputRecordAt(
2246 old_begin_index, HUserRecord<HInstruction*>(this, new_begin));
2247 }
2248 }
2249
AddEnvUseAt(ArenaAllocator * allocator,HEnvironment * user,size_t index)2250 void AddEnvUseAt(ArenaAllocator* allocator, HEnvironment* user, size_t index) {
2251 DCHECK(user != nullptr);
2252 HUseListNode<HEnvironment*>* new_node =
2253 new (allocator) HUseListNode<HEnvironment*>(user, index);
2254 // Note: `old_env_begin` remains valid across `push_front()`.
2255 auto old_env_begin = env_uses_.begin();
2256 env_uses_.push_front(*new_node);
2257 // To speed up this code, we inline the
2258 // FixUpUserRecordsAfterEnvUseInsertion(
2259 // old_env_begin != env_uses_.end() ? ++old_env_begin : old_env_begin);
2260 // to reduce branching as we know that we're going to fix up either one or two entries.
2261 auto new_env_begin = env_uses_.begin();
2262 user->GetVRegs()[index] = HUserRecord<HEnvironment*>(this, env_uses_.before_begin());
2263 if (old_env_begin != env_uses_.end()) {
2264 HEnvironment* old_env_begin_user = old_env_begin->GetUser();
2265 size_t old_env_begin_index = old_env_begin->GetIndex();
2266 old_env_begin_user->GetVRegs()[old_env_begin_index] =
2267 HUserRecord<HEnvironment*>(this, new_env_begin);
2268 }
2269 }
2270
RemoveAsUserOfInput(size_t input)2271 void RemoveAsUserOfInput(size_t input) {
2272 HUserRecord<HInstruction*> input_use = InputRecordAt(input);
2273 HUseList<HInstruction*>::iterator before_use_node = input_use.GetBeforeUseNode();
2274 input_use.GetInstruction()->uses_.erase_after(before_use_node);
2275 input_use.GetInstruction()->FixUpUserRecordsAfterUseRemoval(before_use_node);
2276 }
2277
RemoveAsUserOfAllInputs()2278 void RemoveAsUserOfAllInputs() {
2279 for (const HUserRecord<HInstruction*>& input_use : GetInputRecords()) {
2280 HUseList<HInstruction*>::iterator before_use_node = input_use.GetBeforeUseNode();
2281 input_use.GetInstruction()->uses_.erase_after(before_use_node);
2282 input_use.GetInstruction()->FixUpUserRecordsAfterUseRemoval(before_use_node);
2283 }
2284 }
2285
GetUses()2286 const HUseList<HInstruction*>& GetUses() const { return uses_; }
GetEnvUses()2287 const HUseList<HEnvironment*>& GetEnvUses() const { return env_uses_; }
2288
HasUses()2289 bool HasUses() const { return !uses_.empty() || !env_uses_.empty(); }
HasEnvironmentUses()2290 bool HasEnvironmentUses() const { return !env_uses_.empty(); }
HasNonEnvironmentUses()2291 bool HasNonEnvironmentUses() const { return !uses_.empty(); }
HasOnlyOneNonEnvironmentUse()2292 bool HasOnlyOneNonEnvironmentUse() const {
2293 return !HasEnvironmentUses() && GetUses().HasExactlyOneElement();
2294 }
2295
IsRemovable()2296 bool IsRemovable() const {
2297 return
2298 !DoesAnyWrite() &&
2299 // TODO(solanes): Merge calls from IsSuspendCheck to IsControlFlow into one that doesn't
2300 // do virtual dispatching.
2301 !IsSuspendCheck() &&
2302 !IsNop() &&
2303 !IsParameterValue() &&
2304 // If we added an explicit barrier then we should keep it.
2305 !IsMemoryBarrier() &&
2306 !IsConstructorFence() &&
2307 !IsControlFlow() &&
2308 !CanThrow();
2309 }
2310
IsDeadAndRemovable()2311 bool IsDeadAndRemovable() const {
2312 return !HasUses() && IsRemovable();
2313 }
2314
IsPhiDeadAndRemovable()2315 bool IsPhiDeadAndRemovable() const {
2316 DCHECK(IsPhi());
2317 DCHECK(IsRemovable()) << " phis are always removable";
2318 return !HasUses();
2319 }
2320
2321 // Does this instruction dominate `other_instruction`?
2322 // Aborts if this instruction and `other_instruction` are different phis.
2323 bool Dominates(HInstruction* other_instruction) const;
2324
2325 // Same but with `strictly dominates` i.e. returns false if this instruction and
2326 // `other_instruction` are the same.
2327 bool StrictlyDominates(HInstruction* other_instruction) const;
2328
GetId()2329 int GetId() const { return id_; }
SetId(int id)2330 void SetId(int id) { id_ = id; }
2331
GetSsaIndex()2332 int GetSsaIndex() const { return ssa_index_; }
SetSsaIndex(int ssa_index)2333 void SetSsaIndex(int ssa_index) { ssa_index_ = ssa_index; }
HasSsaIndex()2334 bool HasSsaIndex() const { return ssa_index_ != -1; }
2335
HasEnvironment()2336 bool HasEnvironment() const { return environment_ != nullptr; }
GetEnvironment()2337 HEnvironment* GetEnvironment() const { return environment_; }
GetAllEnvironments()2338 IterationRange<HEnvironmentIterator> GetAllEnvironments() const {
2339 return MakeIterationRange(HEnvironmentIterator(GetEnvironment()),
2340 HEnvironmentIterator(nullptr));
2341 }
2342 // Set the `environment_` field. Raw because this method does not
2343 // update the uses lists.
SetRawEnvironment(HEnvironment * environment)2344 void SetRawEnvironment(HEnvironment* environment) {
2345 DCHECK(environment_ == nullptr);
2346 DCHECK_EQ(environment->GetHolder(), this);
2347 environment_ = environment;
2348 }
2349
InsertRawEnvironment(HEnvironment * environment)2350 void InsertRawEnvironment(HEnvironment* environment) {
2351 DCHECK(environment_ != nullptr);
2352 DCHECK_EQ(environment->GetHolder(), this);
2353 DCHECK(environment->GetParent() == nullptr);
2354 environment->parent_ = environment_;
2355 environment_ = environment;
2356 }
2357
2358 void RemoveEnvironment();
2359
2360 // Set the environment of this instruction, copying it from `environment`. While
2361 // copying, the uses lists are being updated.
CopyEnvironmentFrom(HEnvironment * environment)2362 void CopyEnvironmentFrom(HEnvironment* environment) {
2363 DCHECK(environment_ == nullptr);
2364 ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
2365 environment_ = HEnvironment::Create(allocator, *environment, this);
2366 environment_->CopyFrom(allocator, environment);
2367 if (environment->GetParent() != nullptr) {
2368 environment_->SetAndCopyParentChain(allocator, environment->GetParent());
2369 }
2370 }
2371
CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment * environment,HBasicBlock * loop_header)2372 void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment,
2373 HBasicBlock* loop_header) {
2374 DCHECK(environment_ == nullptr);
2375 ArenaAllocator* allocator = loop_header->GetGraph()->GetAllocator();
2376 environment_ = HEnvironment::Create(allocator, *environment, this);
2377 environment_->CopyFromWithLoopPhiAdjustment(allocator, environment, loop_header);
2378 if (environment->GetParent() != nullptr) {
2379 environment_->SetAndCopyParentChain(allocator, environment->GetParent());
2380 }
2381 }
2382
2383 // Returns the number of entries in the environment. Typically, that is the
2384 // number of dex registers in a method. It could be more in case of inlining.
2385 size_t EnvironmentSize() const;
2386
GetLocations()2387 LocationSummary* GetLocations() const { return locations_; }
SetLocations(LocationSummary * locations)2388 void SetLocations(LocationSummary* locations) { locations_ = locations; }
2389
2390 void ReplaceWith(HInstruction* instruction);
2391 void ReplaceUsesDominatedBy(HInstruction* dominator,
2392 HInstruction* replacement,
2393 bool strictly_dominated = true);
2394 void ReplaceEnvUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
2395 void ReplaceInput(HInstruction* replacement, size_t index);
2396
2397 // This is almost the same as doing `ReplaceWith()`. But in this helper, the
2398 // uses of this instruction by `other` are *not* updated.
ReplaceWithExceptInReplacementAtIndex(HInstruction * other,size_t use_index)2399 void ReplaceWithExceptInReplacementAtIndex(HInstruction* other, size_t use_index) {
2400 ReplaceWith(other);
2401 other->ReplaceInput(this, use_index);
2402 }
2403
2404 // Move `this` instruction before `cursor`
2405 void MoveBefore(HInstruction* cursor, bool do_checks = true);
2406
2407 // Move `this` before its first user and out of any loops. If there is no
2408 // out-of-loop user that dominates all other users, move the instruction
2409 // to the end of the out-of-loop common dominator of the user's blocks.
2410 //
2411 // This can be used only on non-throwing instructions with no side effects that
2412 // have at least one use but no environment uses.
2413 void MoveBeforeFirstUserAndOutOfLoops();
2414
2415 #define INSTRUCTION_TYPE_CHECK(type, super) \
2416 bool Is##type() const;
2417
2418 FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
2419 #undef INSTRUCTION_TYPE_CHECK
2420
2421 #define INSTRUCTION_TYPE_CAST(type, super) \
2422 const H##type* As##type() const; \
2423 H##type* As##type(); \
2424 const H##type* As##type##OrNull() const; \
2425 H##type* As##type##OrNull();
2426
FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CAST)2427 FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CAST)
2428 #undef INSTRUCTION_TYPE_CAST
2429
2430 // Return a clone of the instruction if it is clonable (shallow copy by default, custom copy
2431 // if a custom copy-constructor is provided for a particular type). If IsClonable() is false for
2432 // the instruction then the behaviour of this function is undefined.
2433 //
2434 // Note: It is semantically valid to create a clone of the instruction only until
2435 // prepare_for_register_allocator phase as lifetime, intervals and codegen info are not
2436 // copied.
2437 //
2438 // Note: HEnvironment and some other fields are not copied and are set to default values, see
2439 // 'explicit HInstruction(const HInstruction& other)' for details.
2440 virtual HInstruction* Clone([[maybe_unused]] ArenaAllocator* arena) const {
2441 LOG(FATAL) << "Cloning is not implemented for the instruction " <<
2442 DebugName() << " " << GetId();
2443 UNREACHABLE();
2444 }
2445
2446 // Return whether instruction can be cloned (copied).
IsClonable()2447 virtual bool IsClonable() const { return false; }
2448
2449 // Returns whether the instruction can be moved within the graph.
2450 // TODO: this method is used by LICM and GVN with possibly different
2451 // meanings? split and rename?
CanBeMoved()2452 virtual bool CanBeMoved() const { return false; }
2453
2454 // Returns whether any data encoded in the two instructions is equal.
2455 // This method does not look at the inputs. Both instructions must be
2456 // of the same type, otherwise the method has undefined behavior.
InstructionDataEquals(const HInstruction * other)2457 virtual bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const {
2458 return false;
2459 }
2460
2461 // Returns whether two instructions are equal, that is:
2462 // 1) They have the same type and contain the same data (InstructionDataEquals).
2463 // 2) Their inputs are identical.
2464 bool Equals(const HInstruction* other) const;
2465
GetKind()2466 InstructionKind GetKind() const { return GetPackedField<InstructionKindField>(); }
2467
ComputeHashCode()2468 virtual size_t ComputeHashCode() const {
2469 size_t result = GetKind();
2470 for (const HInstruction* input : GetInputs()) {
2471 result = (result * 31) + input->GetId();
2472 }
2473 return result;
2474 }
2475
GetSideEffects()2476 SideEffects GetSideEffects() const { return side_effects_; }
SetSideEffects(SideEffects other)2477 void SetSideEffects(SideEffects other) { side_effects_ = other; }
AddSideEffects(SideEffects other)2478 void AddSideEffects(SideEffects other) { side_effects_.Add(other); }
2479
GetLifetimePosition()2480 size_t GetLifetimePosition() const { return lifetime_position_; }
SetLifetimePosition(size_t position)2481 void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
GetLiveInterval()2482 LiveInterval* GetLiveInterval() const { return live_interval_; }
SetLiveInterval(LiveInterval * interval)2483 void SetLiveInterval(LiveInterval* interval) { live_interval_ = interval; }
HasLiveInterval()2484 bool HasLiveInterval() const { return live_interval_ != nullptr; }
2485
IsSuspendCheckEntry()2486 bool IsSuspendCheckEntry() const { return IsSuspendCheck() && GetBlock()->IsEntryBlock(); }
2487
2488 // Returns whether the code generation of the instruction will require to have access
2489 // to the current method. Such instructions are:
2490 // (1): Instructions that require an environment, as calling the runtime requires
2491 // to walk the stack and have the current method stored at a specific stack address.
2492 // (2): HCurrentMethod, potentially used by HInvokeStaticOrDirect, HLoadString, or HLoadClass
2493 // to access the dex cache.
NeedsCurrentMethod()2494 bool NeedsCurrentMethod() const {
2495 return NeedsEnvironment() || IsCurrentMethod();
2496 }
2497
2498 // Does this instruction have any use in an environment before
2499 // control flow hits 'other'?
2500 bool HasAnyEnvironmentUseBefore(HInstruction* other);
2501
2502 // Remove all references to environment uses of this instruction.
2503 // The caller must ensure that this is safe to do.
2504 void RemoveEnvironmentUsers();
2505
IsEmittedAtUseSite()2506 bool IsEmittedAtUseSite() const { return GetPackedFlag<kFlagEmittedAtUseSite>(); }
MarkEmittedAtUseSite()2507 void MarkEmittedAtUseSite() { SetPackedFlag<kFlagEmittedAtUseSite>(true); }
2508
2509 protected:
2510 // If set, the machine code for this instruction is assumed to be generated by
2511 // its users. Used by liveness analysis to compute use positions accordingly.
2512 static constexpr size_t kFlagEmittedAtUseSite = 0u;
2513 static constexpr size_t kFlagReferenceTypeIsExact = kFlagEmittedAtUseSite + 1;
2514 static constexpr size_t kFieldInstructionKind = kFlagReferenceTypeIsExact + 1;
2515 static constexpr size_t kFieldInstructionKindSize =
2516 MinimumBitsToStore(static_cast<size_t>(InstructionKind::kLastInstructionKind - 1));
2517 static constexpr size_t kFieldType =
2518 kFieldInstructionKind + kFieldInstructionKindSize;
2519 static constexpr size_t kFieldTypeSize =
2520 MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
2521 static constexpr size_t kNumberOfGenericPackedBits = kFieldType + kFieldTypeSize;
2522 static constexpr size_t kMaxNumberOfPackedBits = sizeof(uint32_t) * kBitsPerByte;
2523
2524 static_assert(kNumberOfGenericPackedBits <= kMaxNumberOfPackedBits,
2525 "Too many generic packed fields");
2526
2527 using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
2528
InputRecordAt(size_t i)2529 const HUserRecord<HInstruction*> InputRecordAt(size_t i) const {
2530 return GetInputRecords()[i];
2531 }
2532
SetRawInputRecordAt(size_t index,const HUserRecord<HInstruction * > & input)2533 void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) {
2534 ArrayRef<HUserRecord<HInstruction*>> input_records = GetInputRecords();
2535 input_records[index] = input;
2536 }
2537
GetPackedFields()2538 uint32_t GetPackedFields() const {
2539 return packed_fields_;
2540 }
2541
2542 template <size_t flag>
GetPackedFlag()2543 bool GetPackedFlag() const {
2544 return (packed_fields_ & (1u << flag)) != 0u;
2545 }
2546
2547 template <size_t flag>
2548 void SetPackedFlag(bool value = true) {
2549 packed_fields_ = (packed_fields_ & ~(1u << flag)) | ((value ? 1u : 0u) << flag);
2550 }
2551
2552 template <typename BitFieldType>
GetPackedField()2553 typename BitFieldType::value_type GetPackedField() const {
2554 return BitFieldType::Decode(packed_fields_);
2555 }
2556
2557 template <typename BitFieldType>
SetPackedField(typename BitFieldType::value_type value)2558 void SetPackedField(typename BitFieldType::value_type value) {
2559 DCHECK(IsUint<BitFieldType::size>(static_cast<uintptr_t>(value)));
2560 packed_fields_ = BitFieldType::Update(value, packed_fields_);
2561 }
2562
2563 // Copy construction for the instruction (used for Clone function).
2564 //
2565 // Fields (e.g. lifetime, intervals and codegen info) associated with phases starting from
2566 // prepare_for_register_allocator are not copied (set to default values).
2567 //
2568 // Copy constructors must be provided for every HInstruction type; default copy constructor is
2569 // fine for most of them. However for some of the instructions a custom copy constructor must be
2570 // specified (when instruction has non-trivially copyable fields and must have a special behaviour
2571 // for copying them).
HInstruction(const HInstruction & other)2572 explicit HInstruction(const HInstruction& other)
2573 : previous_(nullptr),
2574 next_(nullptr),
2575 block_(nullptr),
2576 dex_pc_(other.dex_pc_),
2577 id_(-1),
2578 ssa_index_(-1),
2579 packed_fields_(other.packed_fields_),
2580 environment_(nullptr),
2581 locations_(nullptr),
2582 live_interval_(nullptr),
2583 lifetime_position_(kNoLifetime),
2584 side_effects_(other.side_effects_),
2585 reference_type_handle_(other.reference_type_handle_) {
2586 }
2587
2588 private:
2589 using InstructionKindField =
2590 BitField<InstructionKind, kFieldInstructionKind, kFieldInstructionKindSize>;
2591
FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction * >::iterator fixup_end)2592 void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) {
2593 auto before_use_node = uses_.before_begin();
2594 for (auto use_node = uses_.begin(); use_node != fixup_end; ++use_node) {
2595 HInstruction* user = use_node->GetUser();
2596 size_t input_index = use_node->GetIndex();
2597 user->SetRawInputRecordAt(input_index, HUserRecord<HInstruction*>(this, before_use_node));
2598 before_use_node = use_node;
2599 }
2600 }
2601
FixUpUserRecordsAfterUseRemoval(HUseList<HInstruction * >::iterator before_use_node)2602 void FixUpUserRecordsAfterUseRemoval(HUseList<HInstruction*>::iterator before_use_node) {
2603 auto next = ++HUseList<HInstruction*>::iterator(before_use_node);
2604 if (next != uses_.end()) {
2605 HInstruction* next_user = next->GetUser();
2606 size_t next_index = next->GetIndex();
2607 DCHECK(next_user->InputRecordAt(next_index).GetInstruction() == this);
2608 next_user->SetRawInputRecordAt(next_index, HUserRecord<HInstruction*>(this, before_use_node));
2609 }
2610 }
2611
FixUpUserRecordsAfterEnvUseInsertion(HUseList<HEnvironment * >::iterator env_fixup_end)2612 void FixUpUserRecordsAfterEnvUseInsertion(HUseList<HEnvironment*>::iterator env_fixup_end) {
2613 auto before_env_use_node = env_uses_.before_begin();
2614 for (auto env_use_node = env_uses_.begin(); env_use_node != env_fixup_end; ++env_use_node) {
2615 HEnvironment* user = env_use_node->GetUser();
2616 size_t input_index = env_use_node->GetIndex();
2617 user->GetVRegs()[input_index] = HUserRecord<HEnvironment*>(this, before_env_use_node);
2618 before_env_use_node = env_use_node;
2619 }
2620 }
2621
FixUpUserRecordsAfterEnvUseRemoval(HUseList<HEnvironment * >::iterator before_env_use_node)2622 void FixUpUserRecordsAfterEnvUseRemoval(HUseList<HEnvironment*>::iterator before_env_use_node) {
2623 auto next = ++HUseList<HEnvironment*>::iterator(before_env_use_node);
2624 if (next != env_uses_.end()) {
2625 HEnvironment* next_user = next->GetUser();
2626 size_t next_index = next->GetIndex();
2627 DCHECK(next_user->GetVRegs()[next_index].GetInstruction() == this);
2628 next_user->GetVRegs()[next_index] = HUserRecord<HEnvironment*>(this, before_env_use_node);
2629 }
2630 }
2631
2632 HInstruction* previous_;
2633 HInstruction* next_;
2634 HBasicBlock* block_;
2635 const uint32_t dex_pc_;
2636
2637 // An instruction gets an id when it is added to the graph.
2638 // It reflects creation order. A negative id means the instruction
2639 // has not been added to the graph.
2640 int id_;
2641
2642 // When doing liveness analysis, instructions that have uses get an SSA index.
2643 int ssa_index_;
2644
2645 // Packed fields.
2646 uint32_t packed_fields_;
2647
2648 // List of instructions that have this instruction as input.
2649 HUseList<HInstruction*> uses_;
2650
2651 // List of environments that contain this instruction.
2652 HUseList<HEnvironment*> env_uses_;
2653
2654 // The environment associated with this instruction. Not null if the instruction
2655 // might jump out of the method.
2656 HEnvironment* environment_;
2657
2658 // Set by the code generator.
2659 LocationSummary* locations_;
2660
2661 // Set by the liveness analysis.
2662 LiveInterval* live_interval_;
2663
2664 // Set by the liveness analysis, this is the position in a linear
2665 // order of blocks where this instruction's live interval start.
2666 size_t lifetime_position_;
2667
2668 SideEffects side_effects_;
2669
2670 // The reference handle part of the reference type info.
2671 // The IsExact() flag is stored in packed fields.
2672 // TODO: for primitive types this should be marked as invalid.
2673 ReferenceTypeInfo::TypeHandle reference_type_handle_;
2674
2675 friend class GraphChecker;
2676 friend class HBasicBlock;
2677 friend class HEnvironment;
2678 friend class HGraph;
2679 friend class HInstructionList;
2680 };
2681
2682 std::ostream& operator<<(std::ostream& os, HInstruction::InstructionKind rhs);
2683 std::ostream& operator<<(std::ostream& os, const HInstruction::NoArgsDump rhs);
2684 std::ostream& operator<<(std::ostream& os, const HInstruction::ArgsDump rhs);
2685 std::ostream& operator<<(std::ostream& os, const HUseList<HInstruction*>& lst);
2686 std::ostream& operator<<(std::ostream& os, const HUseList<HEnvironment*>& lst);
2687
2688 // Forward declarations for friends
2689 template <typename InnerIter> struct HSTLInstructionIterator;
2690
2691 // Iterates over the instructions, while preserving the next instruction
2692 // in case the current instruction gets removed from the list by the user
2693 // of this iterator.
2694 class HInstructionIterator : public ValueObject {
2695 public:
HInstructionIterator(const HInstructionList & instructions)2696 explicit HInstructionIterator(const HInstructionList& instructions)
2697 : instruction_(instructions.first_instruction_) {
2698 next_ = Done() ? nullptr : instruction_->GetNext();
2699 }
2700
Done()2701 bool Done() const { return instruction_ == nullptr; }
Current()2702 HInstruction* Current() const { return instruction_; }
Advance()2703 void Advance() {
2704 instruction_ = next_;
2705 next_ = Done() ? nullptr : instruction_->GetNext();
2706 }
2707
2708 private:
HInstructionIterator()2709 HInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
2710
2711 HInstruction* instruction_;
2712 HInstruction* next_;
2713
2714 friend struct HSTLInstructionIterator<HInstructionIterator>;
2715 };
2716
2717 // Iterates over the instructions without saving the next instruction,
2718 // therefore handling changes in the graph potentially made by the user
2719 // of this iterator.
2720 class HInstructionIteratorHandleChanges : public ValueObject {
2721 public:
2722 explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions)
2723 : instruction_(instructions.first_instruction_) {
2724 }
2725
2726 bool Done() const { return instruction_ == nullptr; }
2727 HInstruction* Current() const { return instruction_; }
2728 void Advance() {
2729 instruction_ = instruction_->GetNext();
2730 }
2731
2732 private:
2733 HInstructionIteratorHandleChanges() : instruction_(nullptr) {}
2734
2735 HInstruction* instruction_;
2736
2737 friend struct HSTLInstructionIterator<HInstructionIteratorHandleChanges>;
2738 };
2739
2740
2741 class HBackwardInstructionIterator : public ValueObject {
2742 public:
2743 explicit HBackwardInstructionIterator(const HInstructionList& instructions)
2744 : instruction_(instructions.last_instruction_) {
2745 next_ = Done() ? nullptr : instruction_->GetPrevious();
2746 }
2747
2748 explicit HBackwardInstructionIterator(HInstruction* instruction) : instruction_(instruction) {
2749 next_ = Done() ? nullptr : instruction_->GetPrevious();
2750 }
2751
2752 bool Done() const { return instruction_ == nullptr; }
2753 HInstruction* Current() const { return instruction_; }
2754 void Advance() {
2755 instruction_ = next_;
2756 next_ = Done() ? nullptr : instruction_->GetPrevious();
2757 }
2758
2759 private:
2760 HBackwardInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
2761
2762 HInstruction* instruction_;
2763 HInstruction* next_;
2764
2765 friend struct HSTLInstructionIterator<HBackwardInstructionIterator>;
2766 };
2767
2768 template <typename InnerIter>
2769 struct HSTLInstructionIterator : public ValueObject {
2770 public:
2771 using iterator_category = std::forward_iterator_tag;
2772 using value_type = HInstruction*;
2773 using difference_type = ptrdiff_t;
2774 using pointer = void;
2775 using reference = void;
2776
2777 static_assert(std::is_same_v<InnerIter, HBackwardInstructionIterator> ||
2778 std::is_same_v<InnerIter, HInstructionIterator> ||
2779 std::is_same_v<InnerIter, HInstructionIteratorHandleChanges>,
2780 "Unknown wrapped iterator!");
2781
2782 explicit HSTLInstructionIterator(InnerIter inner) : inner_(inner) {}
2783 HInstruction* operator*() const {
2784 DCHECK(inner_.Current() != nullptr);
2785 return inner_.Current();
2786 }
2787
2788 HSTLInstructionIterator<InnerIter>& operator++() {
2789 DCHECK(*this != HSTLInstructionIterator<InnerIter>::EndIter());
2790 inner_.Advance();
2791 return *this;
2792 }
2793
2794 HSTLInstructionIterator<InnerIter> operator++(int) {
2795 HSTLInstructionIterator<InnerIter> prev(*this);
2796 ++(*this);
2797 return prev;
2798 }
2799
2800 bool operator==(const HSTLInstructionIterator<InnerIter>& other) const {
2801 return inner_.Current() == other.inner_.Current();
2802 }
2803
2804 bool operator!=(const HSTLInstructionIterator<InnerIter>& other) const {
2805 return !(*this == other);
2806 }
2807
2808 static HSTLInstructionIterator<InnerIter> EndIter() {
2809 return HSTLInstructionIterator<InnerIter>(InnerIter());
2810 }
2811
2812 private:
2813 InnerIter inner_;
2814 };
2815
2816 template <typename InnerIter>
2817 IterationRange<HSTLInstructionIterator<InnerIter>> MakeSTLInstructionIteratorRange(InnerIter iter) {
2818 return MakeIterationRange(HSTLInstructionIterator<InnerIter>(iter),
2819 HSTLInstructionIterator<InnerIter>::EndIter());
2820 }
2821
2822 class HVariableInputSizeInstruction : public HInstruction {
2823 public:
2824 using HInstruction::GetInputRecords; // Keep the const version visible.
2825 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
2826 return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
2827 }
2828
2829 void AddInput(HInstruction* input);
2830 void InsertInputAt(size_t index, HInstruction* input);
2831 void RemoveInputAt(size_t index);
2832
2833 // Removes all the inputs.
2834 // Also removes this instructions from each input's use list
2835 // (for non-environment uses only).
2836 void RemoveAllInputs();
2837
2838 protected:
2839 HVariableInputSizeInstruction(InstructionKind inst_kind,
2840 SideEffects side_effects,
2841 uint32_t dex_pc,
2842 ArenaAllocator* allocator,
2843 size_t number_of_inputs,
2844 ArenaAllocKind kind)
2845 : HInstruction(inst_kind, side_effects, dex_pc),
2846 inputs_(number_of_inputs, allocator->Adapter(kind)) {}
2847 HVariableInputSizeInstruction(InstructionKind inst_kind,
2848 DataType::Type type,
2849 SideEffects side_effects,
2850 uint32_t dex_pc,
2851 ArenaAllocator* allocator,
2852 size_t number_of_inputs,
2853 ArenaAllocKind kind)
2854 : HInstruction(inst_kind, type, side_effects, dex_pc),
2855 inputs_(number_of_inputs, allocator->Adapter(kind)) {}
2856
2857 DEFAULT_COPY_CONSTRUCTOR(VariableInputSizeInstruction);
2858
2859 ArenaVector<HUserRecord<HInstruction*>> inputs_;
2860 };
2861
2862 template<size_t N, typename Base = HInstruction>
2863 class HExpression : public Base {
2864 public:
2865 template <typename... Args>
2866 explicit HExpression(Args&&... args)
2867 : Base(std::forward<Args>(args)...), inputs_() {}
2868
2869 virtual ~HExpression() {}
2870
2871 using HInstruction::GetInputRecords; // Keep the const version visible.
2872 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
2873 return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
2874 }
2875
2876 protected:
2877 DEFAULT_COPY_CONSTRUCTOR(Expression);
2878
2879 private:
2880 std::array<HUserRecord<HInstruction*>, N> inputs_;
2881
2882 friend class SsaBuilder;
2883 };
2884
2885 // HExpression specialization for N=0.
2886 template<typename Base>
2887 class HExpression<0, Base> : public Base {
2888 public:
2889 template <typename... Args>
2890 explicit HExpression(Args&&... args)
2891 : Base(std::forward<Args>(args)...) {}
2892
2893 virtual ~HExpression() {}
2894
2895 using HInstruction::GetInputRecords; // Keep the const version visible.
2896 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
2897 return ArrayRef<HUserRecord<HInstruction*>>();
2898 }
2899
2900 protected:
2901 DEFAULT_COPY_CONSTRUCTOR(Expression);
2902
2903 private:
2904 friend class SsaBuilder;
2905 };
2906
2907 class HMethodEntryHook : public HExpression<0> {
2908 public:
2909 explicit HMethodEntryHook(uint32_t dex_pc)
2910 : HExpression(kMethodEntryHook, SideEffects::All(), dex_pc) {}
2911
2912 bool NeedsEnvironment() const override {
2913 return true;
2914 }
2915
2916 bool CanThrow() const override { return true; }
2917
2918 DECLARE_INSTRUCTION(MethodEntryHook);
2919
2920 protected:
2921 DEFAULT_COPY_CONSTRUCTOR(MethodEntryHook);
2922 };
2923
2924 class HMethodExitHook : public HExpression<1> {
2925 public:
2926 HMethodExitHook(HInstruction* value, uint32_t dex_pc)
2927 : HExpression(kMethodExitHook, SideEffects::All(), dex_pc) {
2928 SetRawInputAt(0, value);
2929 }
2930
2931 bool NeedsEnvironment() const override {
2932 return true;
2933 }
2934
2935 bool CanThrow() const override { return true; }
2936
2937 DECLARE_INSTRUCTION(MethodExitHook);
2938
2939 protected:
2940 DEFAULT_COPY_CONSTRUCTOR(MethodExitHook);
2941 };
2942
2943 // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
2944 // instruction that branches to the exit block.
2945 class HReturnVoid final : public HExpression<0> {
2946 public:
2947 explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
2948 : HExpression(kReturnVoid, SideEffects::None(), dex_pc) {
2949 }
2950
2951 bool IsControlFlow() const override { return true; }
2952
2953 DECLARE_INSTRUCTION(ReturnVoid);
2954
2955 protected:
2956 DEFAULT_COPY_CONSTRUCTOR(ReturnVoid);
2957 };
2958
2959 // Represents dex's RETURN opcodes. A HReturn is a control flow
2960 // instruction that branches to the exit block.
2961 class HReturn final : public HExpression<1> {
2962 public:
2963 explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
2964 : HExpression(kReturn, SideEffects::None(), dex_pc) {
2965 SetRawInputAt(0, value);
2966 }
2967
2968 bool IsControlFlow() const override { return true; }
2969
2970 DECLARE_INSTRUCTION(Return);
2971
2972 protected:
2973 DEFAULT_COPY_CONSTRUCTOR(Return);
2974 };
2975
2976 class HPhi final : public HVariableInputSizeInstruction {
2977 public:
2978 HPhi(ArenaAllocator* allocator,
2979 uint32_t reg_number,
2980 size_t number_of_inputs,
2981 DataType::Type type,
2982 uint32_t dex_pc = kNoDexPc)
2983 : HVariableInputSizeInstruction(
2984 kPhi,
2985 ToPhiType(type),
2986 SideEffects::None(),
2987 dex_pc,
2988 allocator,
2989 number_of_inputs,
2990 kArenaAllocPhiInputs),
2991 reg_number_(reg_number) {
2992 DCHECK_NE(GetType(), DataType::Type::kVoid);
2993 // Phis are constructed live and marked dead if conflicting or unused.
2994 // Individual steps of SsaBuilder should assume that if a phi has been
2995 // marked dead, it can be ignored and will be removed by SsaPhiElimination.
2996 SetPackedFlag<kFlagIsLive>(true);
2997 SetPackedFlag<kFlagCanBeNull>(true);
2998 }
2999
3000 bool IsClonable() const override { return true; }
3001
3002 // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
3003 static DataType::Type ToPhiType(DataType::Type type) {
3004 return DataType::Kind(type);
3005 }
3006
3007 bool IsCatchPhi() const { return GetBlock()->IsCatchBlock(); }
3008
3009 void SetType(DataType::Type new_type) {
3010 // Make sure that only valid type changes occur. The following are allowed:
3011 // (1) int -> float/ref (primitive type propagation),
3012 // (2) long -> double (primitive type propagation).
3013 DCHECK(GetType() == new_type ||
3014 (GetType() == DataType::Type::kInt32 && new_type == DataType::Type::kFloat32) ||
3015 (GetType() == DataType::Type::kInt32 && new_type == DataType::Type::kReference) ||
3016 (GetType() == DataType::Type::kInt64 && new_type == DataType::Type::kFloat64));
3017 SetPackedField<TypeField>(new_type);
3018 }
3019
3020 bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
3021 void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
3022
3023 uint32_t GetRegNumber() const { return reg_number_; }
3024
3025 void SetDead() { SetPackedFlag<kFlagIsLive>(false); }
3026 void SetLive() { SetPackedFlag<kFlagIsLive>(true); }
3027 bool IsDead() const { return !IsLive(); }
3028 bool IsLive() const { return GetPackedFlag<kFlagIsLive>(); }
3029
3030 bool IsVRegEquivalentOf(const HInstruction* other) const {
3031 return other != nullptr
3032 && other->IsPhi()
3033 && other->GetBlock() == GetBlock()
3034 && other->AsPhi()->GetRegNumber() == GetRegNumber();
3035 }
3036
3037 bool HasEquivalentPhi() const {
3038 if (GetPrevious() != nullptr && GetPrevious()->AsPhi()->GetRegNumber() == GetRegNumber()) {
3039 return true;
3040 }
3041 if (GetNext() != nullptr && GetNext()->AsPhi()->GetRegNumber() == GetRegNumber()) {
3042 return true;
3043 }
3044 return false;
3045 }
3046
3047 // Returns the next equivalent phi (starting from the current one) or null if there is none.
3048 // An equivalent phi is a phi having the same dex register and type.
3049 // It assumes that phis with the same dex register are adjacent.
3050 HPhi* GetNextEquivalentPhiWithSameType() {
3051 HInstruction* next = GetNext();
3052 while (next != nullptr && next->AsPhi()->GetRegNumber() == reg_number_) {
3053 if (next->GetType() == GetType()) {
3054 return next->AsPhi();
3055 }
3056 next = next->GetNext();
3057 }
3058 return nullptr;
3059 }
3060
3061 DECLARE_INSTRUCTION(Phi);
3062
3063 protected:
3064 DEFAULT_COPY_CONSTRUCTOR(Phi);
3065
3066 private:
3067 static constexpr size_t kFlagIsLive = HInstruction::kNumberOfGenericPackedBits;
3068 static constexpr size_t kFlagCanBeNull = kFlagIsLive + 1;
3069 static constexpr size_t kNumberOfPhiPackedBits = kFlagCanBeNull + 1;
3070 static_assert(kNumberOfPhiPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
3071
3072 const uint32_t reg_number_;
3073 };
3074
3075 // The exit instruction is the only instruction of the exit block.
3076 // Instructions aborting the method (HThrow and HReturn) must branch to the
3077 // exit block.
3078 class HExit final : public HExpression<0> {
3079 public:
3080 explicit HExit(uint32_t dex_pc = kNoDexPc)
3081 : HExpression(kExit, SideEffects::None(), dex_pc) {
3082 }
3083
3084 bool IsControlFlow() const override { return true; }
3085
3086 DECLARE_INSTRUCTION(Exit);
3087
3088 protected:
3089 DEFAULT_COPY_CONSTRUCTOR(Exit);
3090 };
3091
3092 // Jumps from one block to another.
3093 class HGoto final : public HExpression<0> {
3094 public:
3095 explicit HGoto(uint32_t dex_pc = kNoDexPc)
3096 : HExpression(kGoto, SideEffects::None(), dex_pc) {
3097 }
3098
3099 bool IsClonable() const override { return true; }
3100 bool IsControlFlow() const override { return true; }
3101
3102 HBasicBlock* GetSuccessor() const {
3103 return GetBlock()->GetSingleSuccessor();
3104 }
3105
3106 DECLARE_INSTRUCTION(Goto);
3107
3108 protected:
3109 DEFAULT_COPY_CONSTRUCTOR(Goto);
3110 };
3111
3112 class HConstant : public HExpression<0> {
3113 public:
3114 explicit HConstant(InstructionKind kind, DataType::Type type)
3115 : HExpression(kind, type, SideEffects::None(), kNoDexPc) {
3116 }
3117
3118 bool CanBeMoved() const override { return true; }
3119
3120 // Is this constant -1 in the arithmetic sense?
3121 virtual bool IsMinusOne() const { return false; }
3122 // Is this constant 0 in the arithmetic sense?
3123 virtual bool IsArithmeticZero() const { return false; }
3124 // Is this constant a 0-bit pattern?
3125 virtual bool IsZeroBitPattern() const { return false; }
3126 // Is this constant 1 in the arithmetic sense?
3127 virtual bool IsOne() const { return false; }
3128
3129 virtual uint64_t GetValueAsUint64() const = 0;
3130
3131 DECLARE_ABSTRACT_INSTRUCTION(Constant);
3132
3133 protected:
3134 DEFAULT_COPY_CONSTRUCTOR(Constant);
3135 };
3136
3137 class HNullConstant final : public HConstant {
3138 public:
3139 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
3140 return true;
3141 }
3142
3143 uint64_t GetValueAsUint64() const override { return 0; }
3144
3145 size_t ComputeHashCode() const override { return 0; }
3146
3147 // The null constant representation is a 0-bit pattern.
3148 bool IsZeroBitPattern() const override { return true; }
3149
3150 DECLARE_INSTRUCTION(NullConstant);
3151
3152 protected:
3153 DEFAULT_COPY_CONSTRUCTOR(NullConstant);
3154
3155 private:
3156 explicit HNullConstant()
3157 : HConstant(kNullConstant, DataType::Type::kReference) {
3158 }
3159
3160 friend class HGraph;
3161 };
3162
3163 // Constants of the type int. Those can be from Dex instructions, or
3164 // synthesized (for example with the if-eqz instruction).
3165 class HIntConstant final : public HConstant {
3166 public:
3167 int32_t GetValue() const { return value_; }
3168
3169 uint64_t GetValueAsUint64() const override {
3170 return static_cast<uint64_t>(static_cast<uint32_t>(value_));
3171 }
3172
3173 bool InstructionDataEquals(const HInstruction* other) const override {
3174 DCHECK(other->IsIntConstant()) << other->DebugName();
3175 return other->AsIntConstant()->value_ == value_;
3176 }
3177
3178 size_t ComputeHashCode() const override { return GetValue(); }
3179
3180 bool IsMinusOne() const override { return GetValue() == -1; }
3181 bool IsArithmeticZero() const override { return GetValue() == 0; }
3182 bool IsZeroBitPattern() const override { return GetValue() == 0; }
3183 bool IsOne() const override { return GetValue() == 1; }
3184
3185 // Integer constants are used to encode Boolean values as well,
3186 // where 1 means true and 0 means false.
3187 bool IsTrue() const { return GetValue() == 1; }
3188 bool IsFalse() const { return GetValue() == 0; }
3189
3190 explicit HIntConstant(int32_t value)
3191 : HConstant(kIntConstant, DataType::Type::kInt32), value_(value) {
3192 }
3193 explicit HIntConstant(bool value)
3194 : HConstant(kIntConstant, DataType::Type::kInt32),
3195 value_(value ? 1 : 0) {
3196 }
3197
3198 DECLARE_INSTRUCTION(IntConstant);
3199
3200 protected:
3201 DEFAULT_COPY_CONSTRUCTOR(IntConstant);
3202
3203 private:
3204 const int32_t value_;
3205
3206 friend class HGraph;
3207 ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
3208 ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
3209 };
3210
3211 class HLongConstant final : public HConstant {
3212 public:
3213 int64_t GetValue() const { return value_; }
3214
3215 uint64_t GetValueAsUint64() const override { return value_; }
3216
3217 bool InstructionDataEquals(const HInstruction* other) const override {
3218 DCHECK(other->IsLongConstant()) << other->DebugName();
3219 return other->AsLongConstant()->value_ == value_;
3220 }
3221
3222 size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
3223
3224 bool IsMinusOne() const override { return GetValue() == -1; }
3225 bool IsArithmeticZero() const override { return GetValue() == 0; }
3226 bool IsZeroBitPattern() const override { return GetValue() == 0; }
3227 bool IsOne() const override { return GetValue() == 1; }
3228
3229 DECLARE_INSTRUCTION(LongConstant);
3230
3231 protected:
3232 DEFAULT_COPY_CONSTRUCTOR(LongConstant);
3233
3234 private:
3235 explicit HLongConstant(int64_t value)
3236 : HConstant(kLongConstant, DataType::Type::kInt64),
3237 value_(value) {
3238 }
3239
3240 const int64_t value_;
3241
3242 friend class HGraph;
3243 };
3244
3245 class HFloatConstant final : public HConstant {
3246 public:
3247 float GetValue() const { return value_; }
3248
3249 uint64_t GetValueAsUint64() const override {
3250 return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
3251 }
3252
3253 bool InstructionDataEquals(const HInstruction* other) const override {
3254 DCHECK(other->IsFloatConstant()) << other->DebugName();
3255 return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
3256 }
3257
3258 size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
3259
3260 bool IsMinusOne() const override {
3261 return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
3262 }
3263 bool IsArithmeticZero() const override {
3264 return std::fpclassify(value_) == FP_ZERO;
3265 }
3266 bool IsArithmeticPositiveZero() const {
3267 return IsArithmeticZero() && !std::signbit(value_);
3268 }
3269 bool IsArithmeticNegativeZero() const {
3270 return IsArithmeticZero() && std::signbit(value_);
3271 }
3272 bool IsZeroBitPattern() const override {
3273 return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f);
3274 }
3275 bool IsOne() const override {
3276 return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
3277 }
3278 bool IsNaN() const {
3279 return std::isnan(value_);
3280 }
3281
3282 DECLARE_INSTRUCTION(FloatConstant);
3283
3284 protected:
3285 DEFAULT_COPY_CONSTRUCTOR(FloatConstant);
3286
3287 private:
3288 explicit HFloatConstant(float value)
3289 : HConstant(kFloatConstant, DataType::Type::kFloat32),
3290 value_(value) {
3291 }
3292 explicit HFloatConstant(int32_t value)
3293 : HConstant(kFloatConstant, DataType::Type::kFloat32),
3294 value_(bit_cast<float, int32_t>(value)) {
3295 }
3296
3297 const float value_;
3298
3299 // Only the SsaBuilder and HGraph can create floating-point constants.
3300 friend class SsaBuilder;
3301 friend class HGraph;
3302 };
3303
3304 class HDoubleConstant final : public HConstant {
3305 public:
3306 double GetValue() const { return value_; }
3307
3308 uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); }
3309
3310 bool InstructionDataEquals(const HInstruction* other) const override {
3311 DCHECK(other->IsDoubleConstant()) << other->DebugName();
3312 return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
3313 }
3314
3315 size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
3316
3317 bool IsMinusOne() const override {
3318 return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
3319 }
3320 bool IsArithmeticZero() const override {
3321 return std::fpclassify(value_) == FP_ZERO;
3322 }
3323 bool IsArithmeticPositiveZero() const {
3324 return IsArithmeticZero() && !std::signbit(value_);
3325 }
3326 bool IsArithmeticNegativeZero() const {
3327 return IsArithmeticZero() && std::signbit(value_);
3328 }
3329 bool IsZeroBitPattern() const override {
3330 return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0));
3331 }
3332 bool IsOne() const override {
3333 return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
3334 }
3335 bool IsNaN() const {
3336 return std::isnan(value_);
3337 }
3338
3339 DECLARE_INSTRUCTION(DoubleConstant);
3340
3341 protected:
3342 DEFAULT_COPY_CONSTRUCTOR(DoubleConstant);
3343
3344 private:
3345 explicit HDoubleConstant(double value)
3346 : HConstant(kDoubleConstant, DataType::Type::kFloat64),
3347 value_(value) {
3348 }
3349 explicit HDoubleConstant(int64_t value)
3350 : HConstant(kDoubleConstant, DataType::Type::kFloat64),
3351 value_(bit_cast<double, int64_t>(value)) {
3352 }
3353
3354 const double value_;
3355
3356 // Only the SsaBuilder and HGraph can create floating-point constants.
3357 friend class SsaBuilder;
3358 friend class HGraph;
3359 };
3360
3361 // Conditional branch. A block ending with an HIf instruction must have
3362 // two successors.
3363 class HIf final : public HExpression<1> {
3364 public:
3365 explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
3366 : HExpression(kIf, SideEffects::None(), dex_pc),
3367 true_count_(std::numeric_limits<uint16_t>::max()),
3368 false_count_(std::numeric_limits<uint16_t>::max()) {
3369 SetRawInputAt(0, input);
3370 }
3371
3372 bool IsClonable() const override { return true; }
3373 bool IsControlFlow() const override { return true; }
3374
3375 HBasicBlock* IfTrueSuccessor() const {
3376 return GetBlock()->GetSuccessors()[0];
3377 }
3378
3379 HBasicBlock* IfFalseSuccessor() const {
3380 return GetBlock()->GetSuccessors()[1];
3381 }
3382
3383 void SetTrueCount(uint16_t count) { true_count_ = count; }
3384 uint16_t GetTrueCount() const { return true_count_; }
3385
3386 void SetFalseCount(uint16_t count) { false_count_ = count; }
3387 uint16_t GetFalseCount() const { return false_count_; }
3388
3389 DECLARE_INSTRUCTION(If);
3390
3391 protected:
3392 DEFAULT_COPY_CONSTRUCTOR(If);
3393
3394 private:
3395 uint16_t true_count_;
3396 uint16_t false_count_;
3397 };
3398
3399
3400 // Abstract instruction which marks the beginning and/or end of a try block and
3401 // links it to the respective exception handlers. Behaves the same as a Goto in
3402 // non-exceptional control flow.
3403 // Normal-flow successor is stored at index zero, exception handlers under
3404 // higher indices in no particular order.
3405 class HTryBoundary final : public HExpression<0> {
3406 public:
3407 enum class BoundaryKind {
3408 kEntry,
3409 kExit,
3410 kLast = kExit
3411 };
3412
3413 // SideEffects::CanTriggerGC prevents instructions with SideEffects::DependOnGC to be alive
3414 // across the catch block entering edges as GC might happen during throwing an exception.
3415 // TryBoundary with BoundaryKind::kExit is conservatively used for that as there is no
3416 // HInstruction which a catch block must start from.
3417 explicit HTryBoundary(BoundaryKind kind, uint32_t dex_pc = kNoDexPc)
3418 : HExpression(kTryBoundary,
3419 (kind == BoundaryKind::kExit) ? SideEffects::CanTriggerGC()
3420 : SideEffects::None(),
3421 dex_pc) {
3422 SetPackedField<BoundaryKindField>(kind);
3423 }
3424
3425 bool IsControlFlow() const override { return true; }
3426
3427 // Returns the block's non-exceptional successor (index zero).
3428 HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
3429
3430 ArrayRef<HBasicBlock* const> GetExceptionHandlers() const {
3431 return ArrayRef<HBasicBlock* const>(GetBlock()->GetSuccessors()).SubArray(1u);
3432 }
3433
3434 // Returns whether `handler` is among its exception handlers (non-zero index
3435 // successors).
3436 bool HasExceptionHandler(const HBasicBlock& handler) const {
3437 DCHECK(handler.IsCatchBlock());
3438 return GetBlock()->HasSuccessor(&handler, 1u /* Skip first successor. */);
3439 }
3440
3441 // If not present already, adds `handler` to its block's list of exception
3442 // handlers.
3443 void AddExceptionHandler(HBasicBlock* handler) {
3444 if (!HasExceptionHandler(*handler)) {
3445 GetBlock()->AddSuccessor(handler);
3446 }
3447 }
3448
3449 BoundaryKind GetBoundaryKind() const { return GetPackedField<BoundaryKindField>(); }
3450 bool IsEntry() const { return GetBoundaryKind() == BoundaryKind::kEntry; }
3451
3452 bool HasSameExceptionHandlersAs(const HTryBoundary& other) const;
3453
3454 DECLARE_INSTRUCTION(TryBoundary);
3455
3456 protected:
3457 DEFAULT_COPY_CONSTRUCTOR(TryBoundary);
3458
3459 private:
3460 static constexpr size_t kFieldBoundaryKind = kNumberOfGenericPackedBits;
3461 static constexpr size_t kFieldBoundaryKindSize =
3462 MinimumBitsToStore(static_cast<size_t>(BoundaryKind::kLast));
3463 static constexpr size_t kNumberOfTryBoundaryPackedBits =
3464 kFieldBoundaryKind + kFieldBoundaryKindSize;
3465 static_assert(kNumberOfTryBoundaryPackedBits <= kMaxNumberOfPackedBits,
3466 "Too many packed fields.");
3467 using BoundaryKindField = BitField<BoundaryKind, kFieldBoundaryKind, kFieldBoundaryKindSize>;
3468 };
3469
3470 // Deoptimize to interpreter, upon checking a condition.
3471 class HDeoptimize final : public HVariableInputSizeInstruction {
3472 public:
3473 // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
3474 // across.
3475 HDeoptimize(ArenaAllocator* allocator,
3476 HInstruction* cond,
3477 DeoptimizationKind kind,
3478 uint32_t dex_pc)
3479 : HVariableInputSizeInstruction(
3480 kDeoptimize,
3481 SideEffects::All(),
3482 dex_pc,
3483 allocator,
3484 /* number_of_inputs= */ 1,
3485 kArenaAllocMisc) {
3486 SetPackedFlag<kFieldCanBeMoved>(false);
3487 SetPackedField<DeoptimizeKindField>(kind);
3488 SetRawInputAt(0, cond);
3489 }
3490
3491 bool IsClonable() const override { return true; }
3492
3493 // Use this constructor when the `HDeoptimize` guards an instruction, and any user
3494 // that relies on the deoptimization to pass should have its input be the `HDeoptimize`
3495 // instead of `guard`.
3496 // We set CanTriggerGC to prevent any intermediate address to be live
3497 // at the point of the `HDeoptimize`.
3498 HDeoptimize(ArenaAllocator* allocator,
3499 HInstruction* cond,
3500 HInstruction* guard,
3501 DeoptimizationKind kind,
3502 uint32_t dex_pc)
3503 : HVariableInputSizeInstruction(
3504 kDeoptimize,
3505 guard->GetType(),
3506 SideEffects::CanTriggerGC(),
3507 dex_pc,
3508 allocator,
3509 /* number_of_inputs= */ 2,
3510 kArenaAllocMisc) {
3511 SetPackedFlag<kFieldCanBeMoved>(true);
3512 SetPackedField<DeoptimizeKindField>(kind);
3513 SetRawInputAt(0, cond);
3514 SetRawInputAt(1, guard);
3515 }
3516
3517 bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); }
3518
3519 bool InstructionDataEquals(const HInstruction* other) const override {
3520 return (other->CanBeMoved() == CanBeMoved()) &&
3521 (other->AsDeoptimize()->GetDeoptimizationKind() == GetDeoptimizationKind());
3522 }
3523
3524 bool NeedsEnvironment() const override { return true; }
3525
3526 bool CanThrow() const override { return true; }
3527
3528 DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
3529
3530 bool GuardsAnInput() const {
3531 return InputCount() == 2;
3532 }
3533
3534 HInstruction* GuardedInput() const {
3535 DCHECK(GuardsAnInput());
3536 return InputAt(1);
3537 }
3538
3539 void RemoveGuard() {
3540 RemoveInputAt(1);
3541 }
3542
3543 DECLARE_INSTRUCTION(Deoptimize);
3544
3545 protected:
3546 DEFAULT_COPY_CONSTRUCTOR(Deoptimize);
3547
3548 private:
3549 static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
3550 static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
3551 static constexpr size_t kFieldDeoptimizeKindSize =
3552 MinimumBitsToStore(static_cast<size_t>(DeoptimizationKind::kLast));
3553 static constexpr size_t kNumberOfDeoptimizePackedBits =
3554 kFieldDeoptimizeKind + kFieldDeoptimizeKindSize;
3555 static_assert(kNumberOfDeoptimizePackedBits <= kMaxNumberOfPackedBits,
3556 "Too many packed fields.");
3557 using DeoptimizeKindField =
3558 BitField<DeoptimizationKind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
3559 };
3560
3561 // Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
3562 // The compiled code checks this flag value in a guard before devirtualized call and
3563 // if it's true, starts to do deoptimization.
3564 // It has a 4-byte slot on stack.
3565 // TODO: allocate a register for this flag.
3566 class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction {
3567 public:
3568 // CHA guards are only optimized in a separate pass and it has no side effects
3569 // with regard to other passes.
3570 HShouldDeoptimizeFlag(ArenaAllocator* allocator, uint32_t dex_pc)
3571 : HVariableInputSizeInstruction(kShouldDeoptimizeFlag,
3572 DataType::Type::kInt32,
3573 SideEffects::None(),
3574 dex_pc,
3575 allocator,
3576 0,
3577 kArenaAllocCHA) {
3578 }
3579
3580 // We do all CHA guard elimination/motion in a single pass, after which there is no
3581 // further guard elimination/motion since a guard might have been used for justification
3582 // of the elimination of another guard. Therefore, we pretend this guard cannot be moved
3583 // to avoid other optimizations trying to move it.
3584 bool CanBeMoved() const override { return false; }
3585
3586 DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
3587
3588 protected:
3589 DEFAULT_COPY_CONSTRUCTOR(ShouldDeoptimizeFlag);
3590 };
3591
3592 // Represents the ArtMethod that was passed as a first argument to
3593 // the method. It is used by instructions that depend on it, like
3594 // instructions that work with the dex cache.
3595 class HCurrentMethod final : public HExpression<0> {
3596 public:
3597 explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc)
3598 : HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) {
3599 }
3600
3601 DECLARE_INSTRUCTION(CurrentMethod);
3602
3603 protected:
3604 DEFAULT_COPY_CONSTRUCTOR(CurrentMethod);
3605 };
3606
3607 // Fetches an ArtMethod from the virtual table or the interface method table
3608 // of a class.
3609 class HClassTableGet final : public HExpression<1> {
3610 public:
3611 enum class TableKind {
3612 kVTable,
3613 kIMTable,
3614 kLast = kIMTable
3615 };
3616 HClassTableGet(HInstruction* cls,
3617 DataType::Type type,
3618 TableKind kind,
3619 size_t index,
3620 uint32_t dex_pc)
3621 : HExpression(kClassTableGet, type, SideEffects::None(), dex_pc),
3622 index_(index) {
3623 SetPackedField<TableKindField>(kind);
3624 SetRawInputAt(0, cls);
3625 }
3626
3627 bool IsClonable() const override { return true; }
3628 bool CanBeMoved() const override { return true; }
3629 bool InstructionDataEquals(const HInstruction* other) const override {
3630 return other->AsClassTableGet()->GetIndex() == index_ &&
3631 other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
3632 }
3633
3634 TableKind GetTableKind() const { return GetPackedField<TableKindField>(); }
3635 size_t GetIndex() const { return index_; }
3636
3637 DECLARE_INSTRUCTION(ClassTableGet);
3638
3639 protected:
3640 DEFAULT_COPY_CONSTRUCTOR(ClassTableGet);
3641
3642 private:
3643 static constexpr size_t kFieldTableKind = kNumberOfGenericPackedBits;
3644 static constexpr size_t kFieldTableKindSize =
3645 MinimumBitsToStore(static_cast<size_t>(TableKind::kLast));
3646 static constexpr size_t kNumberOfClassTableGetPackedBits = kFieldTableKind + kFieldTableKindSize;
3647 static_assert(kNumberOfClassTableGetPackedBits <= kMaxNumberOfPackedBits,
3648 "Too many packed fields.");
3649 using TableKindField = BitField<TableKind, kFieldTableKind, kFieldTableKindSize>;
3650
3651 // The index of the ArtMethod in the table.
3652 const size_t index_;
3653 };
3654
3655 // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
3656 // have one successor for each entry in the switch table, and the final successor
3657 // will be the block containing the next Dex opcode.
3658 class HPackedSwitch final : public HExpression<1> {
3659 public:
3660 HPackedSwitch(int32_t start_value,
3661 uint32_t num_entries,
3662 HInstruction* input,
3663 uint32_t dex_pc = kNoDexPc)
3664 : HExpression(kPackedSwitch, SideEffects::None(), dex_pc),
3665 start_value_(start_value),
3666 num_entries_(num_entries) {
3667 SetRawInputAt(0, input);
3668 }
3669
3670 bool IsClonable() const override { return true; }
3671
3672 bool IsControlFlow() const override { return true; }
3673
3674 int32_t GetStartValue() const { return start_value_; }
3675
3676 uint32_t GetNumEntries() const { return num_entries_; }
3677
3678 HBasicBlock* GetDefaultBlock() const {
3679 // Last entry is the default block.
3680 return GetBlock()->GetSuccessors()[num_entries_];
3681 }
3682 DECLARE_INSTRUCTION(PackedSwitch);
3683
3684 protected:
3685 DEFAULT_COPY_CONSTRUCTOR(PackedSwitch);
3686
3687 private:
3688 const int32_t start_value_;
3689 const uint32_t num_entries_;
3690 };
3691
3692 class HUnaryOperation : public HExpression<1> {
3693 public:
3694 HUnaryOperation(InstructionKind kind,
3695 DataType::Type result_type,
3696 HInstruction* input,
3697 uint32_t dex_pc = kNoDexPc)
3698 : HExpression(kind, result_type, SideEffects::None(), dex_pc) {
3699 SetRawInputAt(0, input);
3700 }
3701
3702 // All of the UnaryOperation instructions are clonable.
3703 bool IsClonable() const override { return true; }
3704
3705 HInstruction* GetInput() const { return InputAt(0); }
3706 DataType::Type GetResultType() const { return GetType(); }
3707
3708 bool CanBeMoved() const final { return true; }
3709 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
3710 return true;
3711 }
3712
3713 // Try to statically evaluate `this` and return a HConstant
3714 // containing the result of this evaluation. If `this` cannot
3715 // be evaluated as a constant, return null.
3716 HConstant* TryStaticEvaluation() const;
3717
3718 // Same but for `input` instead of GetInput().
3719 HConstant* TryStaticEvaluation(HInstruction* input) const;
3720
3721 // Apply this operation to `x`.
3722 virtual HConstant* Evaluate([[maybe_unused]] HIntConstant* x) const {
3723 LOG(FATAL) << DebugName() << " is not defined for int values";
3724 UNREACHABLE();
3725 }
3726 virtual HConstant* Evaluate([[maybe_unused]] HLongConstant* x) const {
3727 LOG(FATAL) << DebugName() << " is not defined for long values";
3728 UNREACHABLE();
3729 }
3730 virtual HConstant* Evaluate([[maybe_unused]] HFloatConstant* x) const {
3731 LOG(FATAL) << DebugName() << " is not defined for float values";
3732 UNREACHABLE();
3733 }
3734 virtual HConstant* Evaluate([[maybe_unused]] HDoubleConstant* x) const {
3735 LOG(FATAL) << DebugName() << " is not defined for double values";
3736 UNREACHABLE();
3737 }
3738
3739 DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation);
3740
3741 protected:
3742 DEFAULT_COPY_CONSTRUCTOR(UnaryOperation);
3743 };
3744
3745 class HBinaryOperation : public HExpression<2> {
3746 public:
3747 HBinaryOperation(InstructionKind kind,
3748 DataType::Type result_type,
3749 HInstruction* left,
3750 HInstruction* right,
3751 SideEffects side_effects = SideEffects::None(),
3752 uint32_t dex_pc = kNoDexPc)
3753 : HExpression(kind, result_type, side_effects, dex_pc) {
3754 SetRawInputAt(0, left);
3755 SetRawInputAt(1, right);
3756 }
3757
3758 // All of the BinaryOperation instructions are clonable.
3759 bool IsClonable() const override { return true; }
3760
3761 HInstruction* GetLeft() const { return InputAt(0); }
3762 HInstruction* GetRight() const { return InputAt(1); }
3763 DataType::Type GetResultType() const { return GetType(); }
3764
3765 virtual bool IsCommutative() const { return false; }
3766
3767 // Put constant on the right.
3768 // Returns whether order is changed.
3769 bool OrderInputsWithConstantOnTheRight() {
3770 HInstruction* left = InputAt(0);
3771 HInstruction* right = InputAt(1);
3772 if (left->IsConstant() && !right->IsConstant()) {
3773 ReplaceInput(right, 0);
3774 ReplaceInput(left, 1);
3775 return true;
3776 }
3777 return false;
3778 }
3779
3780 // Order inputs by instruction id, but favor constant on the right side.
3781 // This helps GVN for commutative ops.
3782 void OrderInputs() {
3783 DCHECK(IsCommutative());
3784 HInstruction* left = InputAt(0);
3785 HInstruction* right = InputAt(1);
3786 if (left == right || (!left->IsConstant() && right->IsConstant())) {
3787 return;
3788 }
3789 if (OrderInputsWithConstantOnTheRight()) {
3790 return;
3791 }
3792 // Order according to instruction id.
3793 if (left->GetId() > right->GetId()) {
3794 ReplaceInput(right, 0);
3795 ReplaceInput(left, 1);
3796 }
3797 }
3798
3799 bool CanBeMoved() const final { return true; }
3800 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
3801 return true;
3802 }
3803
3804 // Try to statically evaluate `this` and return a HConstant
3805 // containing the result of this evaluation. If `this` cannot
3806 // be evaluated as a constant, return null.
3807 HConstant* TryStaticEvaluation() const;
3808
3809 // Same but for `left` and `right` instead of GetLeft() and GetRight().
3810 HConstant* TryStaticEvaluation(HInstruction* left, HInstruction* right) const;
3811
3812 // Apply this operation to `x` and `y`.
3813 virtual HConstant* Evaluate([[maybe_unused]] HNullConstant* x,
3814 [[maybe_unused]] HNullConstant* y) const {
3815 LOG(FATAL) << DebugName() << " is not defined for the (null, null) case.";
3816 UNREACHABLE();
3817 }
3818 virtual HConstant* Evaluate([[maybe_unused]] HIntConstant* x,
3819 [[maybe_unused]] HIntConstant* y) const {
3820 LOG(FATAL) << DebugName() << " is not defined for the (int, int) case.";
3821 UNREACHABLE();
3822 }
3823 virtual HConstant* Evaluate([[maybe_unused]] HLongConstant* x,
3824 [[maybe_unused]] HLongConstant* y) const {
3825 LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
3826 UNREACHABLE();
3827 }
3828 virtual HConstant* Evaluate([[maybe_unused]] HLongConstant* x,
3829 [[maybe_unused]] HIntConstant* y) const {
3830 LOG(FATAL) << DebugName() << " is not defined for the (long, int) case.";
3831 UNREACHABLE();
3832 }
3833 virtual HConstant* Evaluate([[maybe_unused]] HFloatConstant* x,
3834 [[maybe_unused]] HFloatConstant* y) const {
3835 LOG(FATAL) << DebugName() << " is not defined for float values";
3836 UNREACHABLE();
3837 }
3838 virtual HConstant* Evaluate([[maybe_unused]] HDoubleConstant* x,
3839 [[maybe_unused]] HDoubleConstant* y) const {
3840 LOG(FATAL) << DebugName() << " is not defined for double values";
3841 UNREACHABLE();
3842 }
3843
3844 // Returns an input that can legally be used as the right input and is
3845 // constant, or null.
3846 HConstant* GetConstantRight() const;
3847
3848 // If `GetConstantRight()` returns one of the input, this returns the other
3849 // one. Otherwise it returns null.
3850 HInstruction* GetLeastConstantLeft() const;
3851
3852 DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation);
3853
3854 protected:
3855 DEFAULT_COPY_CONSTRUCTOR(BinaryOperation);
3856 };
3857
3858 // The comparison bias applies for floating point operations and indicates how NaN
3859 // comparisons are treated:
3860 enum class ComparisonBias { // private marker to avoid generate-operator-out.py from processing.
3861 kNoBias, // bias is not applicable (i.e. for long operation)
3862 kGtBias, // return 1 for NaN comparisons
3863 kLtBias, // return -1 for NaN comparisons
3864 kLast = kLtBias
3865 };
3866
3867 std::ostream& operator<<(std::ostream& os, ComparisonBias rhs);
3868
3869 class HCondition : public HBinaryOperation {
3870 public:
3871 HCondition(InstructionKind kind,
3872 HInstruction* first,
3873 HInstruction* second,
3874 uint32_t dex_pc = kNoDexPc)
3875 : HBinaryOperation(kind,
3876 DataType::Type::kBool,
3877 first,
3878 second,
3879 SideEffects::None(),
3880 dex_pc) {
3881 SetPackedField<ComparisonBiasField>(ComparisonBias::kNoBias);
3882 }
3883
3884 static HCondition* Create(HGraph* graph,
3885 IfCondition cond,
3886 HInstruction* lhs,
3887 HInstruction* rhs,
3888 uint32_t dex_pc = kNoDexPc);
3889
3890 // For code generation purposes, returns whether this instruction is just before
3891 // `instruction`, and disregard moves in between.
3892 bool IsBeforeWhenDisregardMoves(HInstruction* instruction) const;
3893
3894 DECLARE_ABSTRACT_INSTRUCTION(Condition);
3895
3896 virtual IfCondition GetCondition() const = 0;
3897
3898 virtual IfCondition GetOppositeCondition() const = 0;
3899
3900 bool IsGtBias() const { return GetBias() == ComparisonBias::kGtBias; }
3901 bool IsLtBias() const { return GetBias() == ComparisonBias::kLtBias; }
3902
3903 ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
3904 void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
3905
3906 bool InstructionDataEquals(const HInstruction* other) const override {
3907 return GetPackedFields() == other->AsCondition()->GetPackedFields();
3908 }
3909
3910 bool IsFPConditionTrueIfNaN() const {
3911 DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
3912 IfCondition if_cond = GetCondition();
3913 if (if_cond == kCondNE) {
3914 return true;
3915 } else if (if_cond == kCondEQ) {
3916 return false;
3917 }
3918 return ((if_cond == kCondGT) || (if_cond == kCondGE)) && IsGtBias();
3919 }
3920
3921 bool IsFPConditionFalseIfNaN() const {
3922 DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
3923 IfCondition if_cond = GetCondition();
3924 if (if_cond == kCondEQ) {
3925 return true;
3926 } else if (if_cond == kCondNE) {
3927 return false;
3928 }
3929 return ((if_cond == kCondLT) || (if_cond == kCondLE)) && IsGtBias();
3930 }
3931
3932 protected:
3933 // Needed if we merge a HCompare into a HCondition.
3934 static constexpr size_t kFieldComparisonBias = kNumberOfGenericPackedBits;
3935 static constexpr size_t kFieldComparisonBiasSize =
3936 MinimumBitsToStore(static_cast<size_t>(ComparisonBias::kLast));
3937 static constexpr size_t kNumberOfConditionPackedBits =
3938 kFieldComparisonBias + kFieldComparisonBiasSize;
3939 static_assert(kNumberOfConditionPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
3940 using ComparisonBiasField =
3941 BitField<ComparisonBias, kFieldComparisonBias, kFieldComparisonBiasSize>;
3942
3943 template <typename T>
3944 int32_t Compare(T x, T y) const { return x > y ? 1 : (x < y ? -1 : 0); }
3945
3946 template <typename T>
3947 int32_t CompareFP(T x, T y) const {
3948 DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
3949 DCHECK_NE(GetBias(), ComparisonBias::kNoBias);
3950 // Handle the bias.
3951 return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compare(x, y);
3952 }
3953
3954 // Return an integer constant containing the result of a condition evaluated at compile time.
3955 HIntConstant* MakeConstantCondition(bool value) const {
3956 return GetBlock()->GetGraph()->GetIntConstant(value);
3957 }
3958
3959 DEFAULT_COPY_CONSTRUCTOR(Condition);
3960 };
3961
3962 // Instruction to check if two inputs are equal to each other.
3963 class HEqual final : public HCondition {
3964 public:
3965 HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
3966 : HCondition(kEqual, first, second, dex_pc) {
3967 }
3968
3969 bool IsCommutative() const override { return true; }
3970
3971 HConstant* Evaluate([[maybe_unused]] HNullConstant* x,
3972 [[maybe_unused]] HNullConstant* y) const override {
3973 return MakeConstantCondition(true);
3974 }
3975 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
3976 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
3977 }
3978 // In the following Evaluate methods, a HCompare instruction has
3979 // been merged into this HEqual instruction; evaluate it as
3980 // `Compare(x, y) == 0`.
3981 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
3982 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
3983 }
3984 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
3985 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
3986 }
3987 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
3988 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
3989 }
3990
3991 DECLARE_INSTRUCTION(Equal);
3992
3993 IfCondition GetCondition() const override {
3994 return kCondEQ;
3995 }
3996
3997 IfCondition GetOppositeCondition() const override {
3998 return kCondNE;
3999 }
4000
4001 protected:
4002 DEFAULT_COPY_CONSTRUCTOR(Equal);
4003
4004 private:
4005 template <typename T> static bool Compute(T x, T y) { return x == y; }
4006 };
4007
4008 class HNotEqual final : public HCondition {
4009 public:
4010 HNotEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4011 : HCondition(kNotEqual, first, second, dex_pc) {
4012 }
4013
4014 bool IsCommutative() const override { return true; }
4015
4016 HConstant* Evaluate([[maybe_unused]] HNullConstant* x,
4017 [[maybe_unused]] HNullConstant* y) const override {
4018 return MakeConstantCondition(false);
4019 }
4020 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4021 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4022 }
4023 // In the following Evaluate methods, a HCompare instruction has
4024 // been merged into this HNotEqual instruction; evaluate it as
4025 // `Compare(x, y) != 0`.
4026 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4027 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
4028 }
4029 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4030 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4031 }
4032 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4033 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4034 }
4035
4036 DECLARE_INSTRUCTION(NotEqual);
4037
4038 IfCondition GetCondition() const override {
4039 return kCondNE;
4040 }
4041
4042 IfCondition GetOppositeCondition() const override {
4043 return kCondEQ;
4044 }
4045
4046 protected:
4047 DEFAULT_COPY_CONSTRUCTOR(NotEqual);
4048
4049 private:
4050 template <typename T> static bool Compute(T x, T y) { return x != y; }
4051 };
4052
4053 class HLessThan final : public HCondition {
4054 public:
4055 HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4056 : HCondition(kLessThan, first, second, dex_pc) {
4057 }
4058
4059 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4060 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4061 }
4062 // In the following Evaluate methods, a HCompare instruction has
4063 // been merged into this HLessThan instruction; evaluate it as
4064 // `Compare(x, y) < 0`.
4065 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4066 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
4067 }
4068 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4069 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4070 }
4071 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4072 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4073 }
4074
4075 DECLARE_INSTRUCTION(LessThan);
4076
4077 IfCondition GetCondition() const override {
4078 return kCondLT;
4079 }
4080
4081 IfCondition GetOppositeCondition() const override {
4082 return kCondGE;
4083 }
4084
4085 protected:
4086 DEFAULT_COPY_CONSTRUCTOR(LessThan);
4087
4088 private:
4089 template <typename T> static bool Compute(T x, T y) { return x < y; }
4090 };
4091
4092 class HLessThanOrEqual final : public HCondition {
4093 public:
4094 HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4095 : HCondition(kLessThanOrEqual, first, second, dex_pc) {
4096 }
4097
4098 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4099 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4100 }
4101 // In the following Evaluate methods, a HCompare instruction has
4102 // been merged into this HLessThanOrEqual instruction; evaluate it as
4103 // `Compare(x, y) <= 0`.
4104 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4105 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
4106 }
4107 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4108 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4109 }
4110 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4111 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4112 }
4113
4114 DECLARE_INSTRUCTION(LessThanOrEqual);
4115
4116 IfCondition GetCondition() const override {
4117 return kCondLE;
4118 }
4119
4120 IfCondition GetOppositeCondition() const override {
4121 return kCondGT;
4122 }
4123
4124 protected:
4125 DEFAULT_COPY_CONSTRUCTOR(LessThanOrEqual);
4126
4127 private:
4128 template <typename T> static bool Compute(T x, T y) { return x <= y; }
4129 };
4130
4131 class HGreaterThan final : public HCondition {
4132 public:
4133 HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4134 : HCondition(kGreaterThan, first, second, dex_pc) {
4135 }
4136
4137 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4138 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4139 }
4140 // In the following Evaluate methods, a HCompare instruction has
4141 // been merged into this HGreaterThan instruction; evaluate it as
4142 // `Compare(x, y) > 0`.
4143 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4144 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
4145 }
4146 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4147 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4148 }
4149 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4150 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4151 }
4152
4153 DECLARE_INSTRUCTION(GreaterThan);
4154
4155 IfCondition GetCondition() const override {
4156 return kCondGT;
4157 }
4158
4159 IfCondition GetOppositeCondition() const override {
4160 return kCondLE;
4161 }
4162
4163 protected:
4164 DEFAULT_COPY_CONSTRUCTOR(GreaterThan);
4165
4166 private:
4167 template <typename T> static bool Compute(T x, T y) { return x > y; }
4168 };
4169
4170 class HGreaterThanOrEqual final : public HCondition {
4171 public:
4172 HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4173 : HCondition(kGreaterThanOrEqual, first, second, dex_pc) {
4174 }
4175
4176 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4177 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4178 }
4179 // In the following Evaluate methods, a HCompare instruction has
4180 // been merged into this HGreaterThanOrEqual instruction; evaluate it as
4181 // `Compare(x, y) >= 0`.
4182 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4183 return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0));
4184 }
4185 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4186 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4187 }
4188 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4189 return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0));
4190 }
4191
4192 DECLARE_INSTRUCTION(GreaterThanOrEqual);
4193
4194 IfCondition GetCondition() const override {
4195 return kCondGE;
4196 }
4197
4198 IfCondition GetOppositeCondition() const override {
4199 return kCondLT;
4200 }
4201
4202 protected:
4203 DEFAULT_COPY_CONSTRUCTOR(GreaterThanOrEqual);
4204
4205 private:
4206 template <typename T> static bool Compute(T x, T y) { return x >= y; }
4207 };
4208
4209 class HBelow final : public HCondition {
4210 public:
4211 HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4212 : HCondition(kBelow, first, second, dex_pc) {
4213 }
4214
4215 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4216 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4217 }
4218 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4219 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4220 }
4221
4222 DECLARE_INSTRUCTION(Below);
4223
4224 IfCondition GetCondition() const override {
4225 return kCondB;
4226 }
4227
4228 IfCondition GetOppositeCondition() const override {
4229 return kCondAE;
4230 }
4231
4232 protected:
4233 DEFAULT_COPY_CONSTRUCTOR(Below);
4234
4235 private:
4236 template <typename T> static bool Compute(T x, T y) {
4237 return MakeUnsigned(x) < MakeUnsigned(y);
4238 }
4239 };
4240
4241 class HBelowOrEqual final : public HCondition {
4242 public:
4243 HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4244 : HCondition(kBelowOrEqual, first, second, dex_pc) {
4245 }
4246
4247 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4248 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4249 }
4250 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4251 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4252 }
4253
4254 DECLARE_INSTRUCTION(BelowOrEqual);
4255
4256 IfCondition GetCondition() const override {
4257 return kCondBE;
4258 }
4259
4260 IfCondition GetOppositeCondition() const override {
4261 return kCondA;
4262 }
4263
4264 protected:
4265 DEFAULT_COPY_CONSTRUCTOR(BelowOrEqual);
4266
4267 private:
4268 template <typename T> static bool Compute(T x, T y) {
4269 return MakeUnsigned(x) <= MakeUnsigned(y);
4270 }
4271 };
4272
4273 class HAbove final : public HCondition {
4274 public:
4275 HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4276 : HCondition(kAbove, first, second, dex_pc) {
4277 }
4278
4279 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4280 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4281 }
4282 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4283 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4284 }
4285
4286 DECLARE_INSTRUCTION(Above);
4287
4288 IfCondition GetCondition() const override {
4289 return kCondA;
4290 }
4291
4292 IfCondition GetOppositeCondition() const override {
4293 return kCondBE;
4294 }
4295
4296 protected:
4297 DEFAULT_COPY_CONSTRUCTOR(Above);
4298
4299 private:
4300 template <typename T> static bool Compute(T x, T y) {
4301 return MakeUnsigned(x) > MakeUnsigned(y);
4302 }
4303 };
4304
4305 class HAboveOrEqual final : public HCondition {
4306 public:
4307 HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
4308 : HCondition(kAboveOrEqual, first, second, dex_pc) {
4309 }
4310
4311 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4312 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4313 }
4314 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4315 return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()));
4316 }
4317
4318 DECLARE_INSTRUCTION(AboveOrEqual);
4319
4320 IfCondition GetCondition() const override {
4321 return kCondAE;
4322 }
4323
4324 IfCondition GetOppositeCondition() const override {
4325 return kCondB;
4326 }
4327
4328 protected:
4329 DEFAULT_COPY_CONSTRUCTOR(AboveOrEqual);
4330
4331 private:
4332 template <typename T> static bool Compute(T x, T y) {
4333 return MakeUnsigned(x) >= MakeUnsigned(y);
4334 }
4335 };
4336
4337 // Instruction to check how two inputs compare to each other.
4338 // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
4339 class HCompare final : public HBinaryOperation {
4340 public:
4341 // Note that `comparison_type` is the type of comparison performed
4342 // between the comparison's inputs, not the type of the instantiated
4343 // HCompare instruction (which is always DataType::Type::kInt).
4344 HCompare(DataType::Type comparison_type,
4345 HInstruction* first,
4346 HInstruction* second,
4347 ComparisonBias bias,
4348 uint32_t dex_pc)
4349 : HBinaryOperation(kCompare,
4350 DataType::Type::kInt32,
4351 first,
4352 second,
4353 SideEffectsForArchRuntimeCalls(comparison_type),
4354 dex_pc) {
4355 SetPackedField<ComparisonBiasField>(bias);
4356 SetPackedField<ComparisonTypeField>(comparison_type);
4357 }
4358
4359 template <typename T>
4360 int32_t Compute(T x, T y) const { return x > y ? 1 : (x < y ? -1 : 0); }
4361
4362 template <typename T>
4363 int32_t ComputeFP(T x, T y) const {
4364 DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
4365 DCHECK_NE(GetBias(), ComparisonBias::kNoBias);
4366 // Handle the bias.
4367 return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
4368 }
4369
4370 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
4371 // Note that there is no "cmp-int" Dex instruction so we shouldn't
4372 // reach this code path when processing a freshly built HIR
4373 // graph. However HCompare integer instructions can be synthesized
4374 // by the instruction simplifier to implement IntegerCompare and
4375 // IntegerSignum intrinsics, so we have to handle this case.
4376 const int32_t value = DataType::IsUnsignedType(GetComparisonType()) ?
4377 Compute(x->GetValueAsUint64(), y->GetValueAsUint64()) :
4378 Compute(x->GetValue(), y->GetValue());
4379 return MakeConstantComparison(value);
4380 }
4381 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
4382 const int32_t value = DataType::IsUnsignedType(GetComparisonType()) ?
4383 Compute(x->GetValueAsUint64(), y->GetValueAsUint64()) :
4384 Compute(x->GetValue(), y->GetValue());
4385 return MakeConstantComparison(value);
4386 }
4387 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
4388 return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()));
4389 }
4390 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
4391 return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()));
4392 }
4393
4394 bool InstructionDataEquals(const HInstruction* other) const override {
4395 return GetPackedFields() == other->AsCompare()->GetPackedFields();
4396 }
4397
4398 ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
4399
4400 DataType::Type GetComparisonType() const { return GetPackedField<ComparisonTypeField>(); }
4401
4402 void SetComparisonType(DataType::Type newType) { SetPackedField<ComparisonTypeField>(newType); }
4403
4404 // Does this compare instruction have a "gt bias" (vs an "lt bias")?
4405 // Only meaningful for floating-point comparisons.
4406 bool IsGtBias() const {
4407 DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
4408 return GetBias() == ComparisonBias::kGtBias;
4409 }
4410
4411 static SideEffects SideEffectsForArchRuntimeCalls([[maybe_unused]] DataType::Type type) {
4412 // Comparisons do not require a runtime call in any back end.
4413 return SideEffects::None();
4414 }
4415
4416 DECLARE_INSTRUCTION(Compare);
4417
4418 protected:
4419 static constexpr size_t kFieldComparisonBias = kNumberOfGenericPackedBits;
4420 static constexpr size_t kFieldComparisonBiasSize =
4421 MinimumBitsToStore(static_cast<size_t>(ComparisonBias::kLast));
4422 static constexpr size_t kFieldComparisonType = kFieldComparisonBias + kFieldComparisonBiasSize;
4423 static constexpr size_t kFieldComparisonTypeSize =
4424 MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
4425 static constexpr size_t kNumberOfComparePackedBits =
4426 kFieldComparisonType + kFieldComparisonTypeSize;
4427 static_assert(kNumberOfComparePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
4428 using ComparisonBiasField =
4429 BitField<ComparisonBias, kFieldComparisonBias, kFieldComparisonBiasSize>;
4430 using ComparisonTypeField =
4431 BitField<DataType::Type, kFieldComparisonType, kFieldComparisonTypeSize>;
4432
4433 // Return an integer constant containing the result of a comparison evaluated at compile time.
4434 HIntConstant* MakeConstantComparison(int32_t value) const {
4435 DCHECK(value == -1 || value == 0 || value == 1) << value;
4436 return GetBlock()->GetGraph()->GetIntConstant(value);
4437 }
4438
4439 DEFAULT_COPY_CONSTRUCTOR(Compare);
4440 };
4441
4442 class HNewInstance final : public HExpression<1> {
4443 public:
4444 HNewInstance(HInstruction* cls,
4445 uint32_t dex_pc,
4446 dex::TypeIndex type_index,
4447 const DexFile& dex_file,
4448 bool finalizable,
4449 QuickEntrypointEnum entrypoint)
4450 : HExpression(kNewInstance,
4451 DataType::Type::kReference,
4452 SideEffects::CanTriggerGC(),
4453 dex_pc),
4454 type_index_(type_index),
4455 dex_file_(dex_file),
4456 entrypoint_(entrypoint) {
4457 SetPackedFlag<kFlagFinalizable>(finalizable);
4458 SetPackedFlag<kFlagPartialMaterialization>(false);
4459 SetRawInputAt(0, cls);
4460 }
4461
4462 bool IsClonable() const override { return true; }
4463
4464 void SetPartialMaterialization() {
4465 SetPackedFlag<kFlagPartialMaterialization>(true);
4466 }
4467
4468 dex::TypeIndex GetTypeIndex() const { return type_index_; }
4469 const DexFile& GetDexFile() const { return dex_file_; }
4470
4471 // Calls runtime so needs an environment.
4472 bool NeedsEnvironment() const override { return true; }
4473
4474 // Can throw errors when out-of-memory or if it's not instantiable/accessible.
4475 bool CanThrow() const override { return true; }
4476 bool OnlyThrowsAsyncExceptions() const override {
4477 return !IsFinalizable() && !NeedsChecks();
4478 }
4479
4480 bool NeedsChecks() const {
4481 return entrypoint_ == kQuickAllocObjectWithChecks;
4482 }
4483
4484 bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
4485
4486 bool CanBeNull() const override { return false; }
4487
4488 bool IsPartialMaterialization() const {
4489 return GetPackedFlag<kFlagPartialMaterialization>();
4490 }
4491
4492 QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
4493
4494 void SetEntrypoint(QuickEntrypointEnum entrypoint) {
4495 entrypoint_ = entrypoint;
4496 }
4497
4498 HLoadClass* GetLoadClass() const {
4499 HInstruction* input = InputAt(0);
4500 if (input->IsClinitCheck()) {
4501 input = input->InputAt(0);
4502 }
4503 DCHECK(input->IsLoadClass());
4504 return input->AsLoadClass();
4505 }
4506
4507 bool IsStringAlloc() const;
4508
4509 DECLARE_INSTRUCTION(NewInstance);
4510
4511 protected:
4512 DEFAULT_COPY_CONSTRUCTOR(NewInstance);
4513
4514 private:
4515 static constexpr size_t kFlagFinalizable = kNumberOfGenericPackedBits;
4516 static constexpr size_t kFlagPartialMaterialization = kFlagFinalizable + 1;
4517 static constexpr size_t kNumberOfNewInstancePackedBits = kFlagPartialMaterialization + 1;
4518 static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
4519 "Too many packed fields.");
4520
4521 const dex::TypeIndex type_index_;
4522 const DexFile& dex_file_;
4523 QuickEntrypointEnum entrypoint_;
4524 };
4525
4526 enum IntrinsicNeedsEnvironment {
4527 kNoEnvironment, // Intrinsic does not require an environment.
4528 kNeedsEnvironment // Intrinsic requires an environment.
4529 };
4530
4531 enum IntrinsicSideEffects {
4532 kNoSideEffects, // Intrinsic does not have any heap memory side effects.
4533 kReadSideEffects, // Intrinsic may read heap memory.
4534 kWriteSideEffects, // Intrinsic may write heap memory.
4535 kAllSideEffects // Intrinsic may read or write heap memory, or trigger GC.
4536 };
4537
4538 enum IntrinsicExceptions {
4539 kNoThrow, // Intrinsic does not throw any exceptions.
4540 kCanThrow // Intrinsic may throw exceptions.
4541 };
4542
4543 // Determines how to load an ArtMethod*.
4544 enum class MethodLoadKind {
4545 // Use a String init ArtMethod* loaded from Thread entrypoints.
4546 kStringInit,
4547
4548 // Use the method's own ArtMethod* loaded by the register allocator.
4549 kRecursive,
4550
4551 // Use PC-relative boot image ArtMethod* address that will be known at link time.
4552 // Used for boot image methods referenced by boot image code.
4553 kBootImageLinkTimePcRelative,
4554
4555 // Load from a boot image entry in the .data.img.rel.ro using a PC-relative load.
4556 // Used for app->boot calls with relocatable image.
4557 kBootImageRelRo,
4558
4559 // Load from an app image entry in the .data.img.rel.ro using a PC-relative load.
4560 // Used for app image methods referenced by apps in AOT-compiled code.
4561 kAppImageRelRo,
4562
4563 // Load from an entry in the .bss section using a PC-relative load.
4564 // Used for methods outside boot image referenced by AOT-compiled app and boot image code.
4565 kBssEntry,
4566
4567 // Use ArtMethod* at a known address, embed the direct address in the code.
4568 // Used for for JIT-compiled calls.
4569 kJitDirectAddress,
4570
4571 // Make a runtime call to resolve and call the method. This is the last-resort-kind
4572 // used when other kinds are unimplemented on a particular architecture.
4573 kRuntimeCall,
4574 };
4575
4576 // Determines the location of the code pointer of an invoke.
4577 enum class CodePtrLocation {
4578 // Recursive call, use local PC-relative call instruction.
4579 kCallSelf,
4580
4581 // Use native pointer from the Artmethod*.
4582 // Used for @CriticalNative to avoid going through the compiled stub. This call goes through
4583 // a special resolution stub if the class is not initialized or no native code is registered.
4584 kCallCriticalNative,
4585
4586 // Use code pointer from the ArtMethod*.
4587 // Used when we don't know the target code. This is also the last-resort-kind used when
4588 // other kinds are unimplemented or impractical (i.e. slow) on a particular architecture.
4589 kCallArtMethod,
4590 };
4591
4592 static inline bool IsPcRelativeMethodLoadKind(MethodLoadKind load_kind) {
4593 return load_kind == MethodLoadKind::kBootImageLinkTimePcRelative ||
4594 load_kind == MethodLoadKind::kBootImageRelRo ||
4595 load_kind == MethodLoadKind::kAppImageRelRo ||
4596 load_kind == MethodLoadKind::kBssEntry;
4597 }
4598
4599 class HInvoke : public HVariableInputSizeInstruction {
4600 public:
4601 bool NeedsEnvironment() const override;
4602
4603 void SetArgumentAt(size_t index, HInstruction* argument) {
4604 SetRawInputAt(index, argument);
4605 }
4606
4607 // Return the number of arguments. This number can be lower than
4608 // the number of inputs returned by InputCount(), as some invoke
4609 // instructions (e.g. HInvokeStaticOrDirect) can have non-argument
4610 // inputs at the end of their list of inputs.
4611 uint32_t GetNumberOfArguments() const { return number_of_arguments_; }
4612
4613 // Return the number of outgoing vregs.
4614 uint32_t GetNumberOfOutVRegs() const { return number_of_out_vregs_; }
4615
4616 InvokeType GetInvokeType() const {
4617 return GetPackedField<InvokeTypeField>();
4618 }
4619
4620 Intrinsics GetIntrinsic() const {
4621 return intrinsic_;
4622 }
4623
4624 void SetIntrinsic(Intrinsics intrinsic,
4625 IntrinsicNeedsEnvironment needs_env,
4626 IntrinsicSideEffects side_effects,
4627 IntrinsicExceptions exceptions);
4628
4629 bool IsFromInlinedInvoke() const {
4630 return GetEnvironment()->IsFromInlinedInvoke();
4631 }
4632
4633 void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
4634
4635 bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); }
4636
4637 void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
4638
4639 bool AlwaysThrows() const override final { return GetPackedFlag<kFlagAlwaysThrows>(); }
4640
4641 bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); }
4642
4643 bool CanBeNull() const override;
4644
4645 bool InstructionDataEquals(const HInstruction* other) const override {
4646 return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
4647 }
4648
4649 uint32_t* GetIntrinsicOptimizations() {
4650 return &intrinsic_optimizations_;
4651 }
4652
4653 const uint32_t* GetIntrinsicOptimizations() const {
4654 return &intrinsic_optimizations_;
4655 }
4656
4657 bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
4658
4659 ArtMethod* GetResolvedMethod() const { return resolved_method_; }
4660 void SetResolvedMethod(ArtMethod* method, bool enable_intrinsic_opt);
4661
4662 MethodReference GetMethodReference() const { return method_reference_; }
4663
4664 const MethodReference GetResolvedMethodReference() const {
4665 return resolved_method_reference_;
4666 }
4667
4668 DECLARE_ABSTRACT_INSTRUCTION(Invoke);
4669
4670 protected:
4671 static constexpr size_t kFieldInvokeType = kNumberOfGenericPackedBits;
4672 static constexpr size_t kFieldInvokeTypeSize =
4673 MinimumBitsToStore(static_cast<size_t>(kMaxInvokeType));
4674 static constexpr size_t kFlagCanThrow = kFieldInvokeType + kFieldInvokeTypeSize;
4675 static constexpr size_t kFlagAlwaysThrows = kFlagCanThrow + 1;
4676 static constexpr size_t kNumberOfInvokePackedBits = kFlagAlwaysThrows + 1;
4677 static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
4678 using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
4679
4680 HInvoke(InstructionKind kind,
4681 ArenaAllocator* allocator,
4682 uint32_t number_of_arguments,
4683 uint32_t number_of_out_vregs,
4684 uint32_t number_of_other_inputs,
4685 DataType::Type return_type,
4686 uint32_t dex_pc,
4687 MethodReference method_reference,
4688 ArtMethod* resolved_method,
4689 MethodReference resolved_method_reference,
4690 InvokeType invoke_type,
4691 bool enable_intrinsic_opt)
4692 : HVariableInputSizeInstruction(
4693 kind,
4694 return_type,
4695 SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
4696 dex_pc,
4697 allocator,
4698 number_of_arguments + number_of_other_inputs,
4699 kArenaAllocInvokeInputs),
4700 method_reference_(method_reference),
4701 resolved_method_reference_(resolved_method_reference),
4702 number_of_arguments_(dchecked_integral_cast<uint16_t>(number_of_arguments)),
4703 number_of_out_vregs_(dchecked_integral_cast<uint16_t>(number_of_out_vregs)),
4704 intrinsic_(Intrinsics::kNone),
4705 intrinsic_optimizations_(0) {
4706 SetPackedField<InvokeTypeField>(invoke_type);
4707 SetPackedFlag<kFlagCanThrow>(true);
4708 SetResolvedMethod(resolved_method, enable_intrinsic_opt);
4709 }
4710
4711 DEFAULT_COPY_CONSTRUCTOR(Invoke);
4712
4713 ArtMethod* resolved_method_;
4714 const MethodReference method_reference_;
4715 // Cached values of the resolved method, to avoid needing the mutator lock.
4716 const MethodReference resolved_method_reference_;
4717
4718 uint16_t number_of_arguments_;
4719 uint16_t number_of_out_vregs_;
4720
4721 Intrinsics intrinsic_;
4722
4723 // A magic word holding optimizations for intrinsics. See intrinsics.h.
4724 uint32_t intrinsic_optimizations_;
4725 };
4726
4727 class HInvokeUnresolved final : public HInvoke {
4728 public:
4729 HInvokeUnresolved(ArenaAllocator* allocator,
4730 uint32_t number_of_arguments,
4731 uint32_t number_of_out_vregs,
4732 DataType::Type return_type,
4733 uint32_t dex_pc,
4734 MethodReference method_reference,
4735 InvokeType invoke_type)
4736 : HInvoke(kInvokeUnresolved,
4737 allocator,
4738 number_of_arguments,
4739 number_of_out_vregs,
4740 /* number_of_other_inputs= */ 0u,
4741 return_type,
4742 dex_pc,
4743 method_reference,
4744 nullptr,
4745 MethodReference(nullptr, 0u),
4746 invoke_type,
4747 /* enable_intrinsic_opt= */ false) {
4748 }
4749
4750 bool IsClonable() const override { return true; }
4751
4752 DECLARE_INSTRUCTION(InvokeUnresolved);
4753
4754 protected:
4755 DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
4756 };
4757
4758 class HInvokePolymorphic final : public HInvoke {
4759 public:
4760 HInvokePolymorphic(ArenaAllocator* allocator,
4761 uint32_t number_of_arguments,
4762 uint32_t number_of_out_vregs,
4763 uint32_t number_of_other_inputs,
4764 DataType::Type return_type,
4765 uint32_t dex_pc,
4766 MethodReference method_reference,
4767 // resolved_method is the ArtMethod object corresponding to the polymorphic
4768 // method (e.g. VarHandle.get), resolved using the class linker. It is needed
4769 // to pass intrinsic information to the HInvokePolymorphic node.
4770 ArtMethod* resolved_method,
4771 MethodReference resolved_method_reference,
4772 dex::ProtoIndex proto_idx)
4773 : HInvoke(kInvokePolymorphic,
4774 allocator,
4775 number_of_arguments,
4776 number_of_out_vregs,
4777 number_of_other_inputs,
4778 return_type,
4779 dex_pc,
4780 method_reference,
4781 resolved_method,
4782 resolved_method_reference,
4783 kPolymorphic,
4784 /* enable_intrinsic_opt= */ true),
4785 proto_idx_(proto_idx) {}
4786
4787 bool IsClonable() const override { return true; }
4788
4789 dex::ProtoIndex GetProtoIndex() { return proto_idx_; }
4790
4791 bool IsMethodHandleInvokeExact() const {
4792 return GetIntrinsic() == Intrinsics::kMethodHandleInvokeExact;
4793 }
4794
4795 bool CanTargetInstanceMethod() const {
4796 DCHECK(IsMethodHandleInvokeExact());
4797 return GetNumberOfArguments() >= 2 &&
4798 InputAt(1)->GetType() == DataType::Type::kReference;
4799 }
4800
4801 DECLARE_INSTRUCTION(InvokePolymorphic);
4802
4803 protected:
4804 dex::ProtoIndex proto_idx_;
4805 DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
4806 };
4807
4808 class HInvokeCustom final : public HInvoke {
4809 public:
4810 HInvokeCustom(ArenaAllocator* allocator,
4811 uint32_t number_of_arguments,
4812 uint32_t number_of_out_vregs,
4813 uint32_t call_site_index,
4814 DataType::Type return_type,
4815 uint32_t dex_pc,
4816 MethodReference method_reference,
4817 bool enable_intrinsic_opt)
4818 : HInvoke(kInvokeCustom,
4819 allocator,
4820 number_of_arguments,
4821 number_of_out_vregs,
4822 /* number_of_other_inputs= */ 0u,
4823 return_type,
4824 dex_pc,
4825 method_reference,
4826 /* resolved_method= */ nullptr,
4827 MethodReference(nullptr, 0u),
4828 kStatic,
4829 enable_intrinsic_opt),
4830 call_site_index_(call_site_index) {
4831 }
4832
4833 uint32_t GetCallSiteIndex() const { return call_site_index_; }
4834
4835 bool IsClonable() const override { return true; }
4836
4837 DECLARE_INSTRUCTION(InvokeCustom);
4838
4839 protected:
4840 DEFAULT_COPY_CONSTRUCTOR(InvokeCustom);
4841
4842 private:
4843 uint32_t call_site_index_;
4844 };
4845
4846 class HInvokeStaticOrDirect final : public HInvoke {
4847 public:
4848 // Requirements of this method call regarding the class
4849 // initialization (clinit) check of its declaring class.
4850 enum class ClinitCheckRequirement { // private marker to avoid generate-operator-out.py from processing.
4851 kNone, // Class already initialized.
4852 kExplicit, // Static call having explicit clinit check as last input.
4853 kImplicit, // Static call implicitly requiring a clinit check.
4854 kLast = kImplicit
4855 };
4856
4857 struct DispatchInfo {
4858 MethodLoadKind method_load_kind;
4859 CodePtrLocation code_ptr_location;
4860 // The method load data holds
4861 // - thread entrypoint offset for kStringInit method if this is a string init invoke.
4862 // Note that there are multiple string init methods, each having its own offset.
4863 // - the method address for kDirectAddress
4864 uint64_t method_load_data;
4865 };
4866
4867 HInvokeStaticOrDirect(ArenaAllocator* allocator,
4868 uint32_t number_of_arguments,
4869 uint32_t number_of_out_vregs,
4870 DataType::Type return_type,
4871 uint32_t dex_pc,
4872 MethodReference method_reference,
4873 ArtMethod* resolved_method,
4874 DispatchInfo dispatch_info,
4875 InvokeType invoke_type,
4876 MethodReference resolved_method_reference,
4877 ClinitCheckRequirement clinit_check_requirement,
4878 bool enable_intrinsic_opt)
4879 : HInvoke(kInvokeStaticOrDirect,
4880 allocator,
4881 number_of_arguments,
4882 number_of_out_vregs,
4883 // There is potentially one extra argument for the HCurrentMethod input,
4884 // and one other if the clinit check is explicit. These can be removed later.
4885 (NeedsCurrentMethodInput(dispatch_info) ? 1u : 0u) +
4886 (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
4887 return_type,
4888 dex_pc,
4889 method_reference,
4890 resolved_method,
4891 resolved_method_reference,
4892 invoke_type,
4893 enable_intrinsic_opt),
4894 dispatch_info_(dispatch_info) {
4895 SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
4896 }
4897
4898 bool IsClonable() const override { return true; }
4899 bool NeedsBss() const override {
4900 return GetMethodLoadKind() == MethodLoadKind::kBssEntry;
4901 }
4902
4903 void SetDispatchInfo(DispatchInfo dispatch_info) {
4904 bool had_current_method_input = HasCurrentMethodInput();
4905 bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info);
4906
4907 // Using the current method is the default and once we find a better
4908 // method load kind, we should not go back to using the current method.
4909 DCHECK(had_current_method_input || !needs_current_method_input);
4910
4911 if (had_current_method_input && !needs_current_method_input) {
4912 DCHECK_EQ(InputAt(GetCurrentMethodIndex()), GetBlock()->GetGraph()->GetCurrentMethod());
4913 RemoveInputAt(GetCurrentMethodIndex());
4914 }
4915 dispatch_info_ = dispatch_info;
4916 }
4917
4918 DispatchInfo GetDispatchInfo() const {
4919 return dispatch_info_;
4920 }
4921
4922 using HInstruction::GetInputRecords; // Keep the const version visible.
4923 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
4924 ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
4925 if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) {
4926 DCHECK(!input_records.empty());
4927 DCHECK_GT(input_records.size(), GetNumberOfArguments());
4928 HInstruction* last_input = input_records.back().GetInstruction();
4929 // Note: `last_input` may be null during arguments setup.
4930 if (last_input != nullptr) {
4931 // `last_input` is the last input of a static invoke marked as having
4932 // an explicit clinit check. It must either be:
4933 // - an art::HClinitCheck instruction, set by art::HGraphBuilder; or
4934 // - an art::HLoadClass instruction, set by art::PrepareForRegisterAllocation.
4935 DCHECK(last_input->IsClinitCheck() || last_input->IsLoadClass()) << last_input->DebugName();
4936 }
4937 }
4938 return input_records;
4939 }
4940
4941 bool CanDoImplicitNullCheckOn([[maybe_unused]] HInstruction* obj) const override {
4942 // We do not access the method via object reference, so we cannot do an implicit null check.
4943 // TODO: for intrinsics we can generate implicit null checks.
4944 return false;
4945 }
4946
4947 bool CanBeNull() const override;
4948
4949 MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
4950 CodePtrLocation GetCodePtrLocation() const {
4951 // We do CHA analysis after sharpening. When a method has CHA inlining, it
4952 // cannot call itself, as if the CHA optmization is invalid we want to make
4953 // sure the method is never executed again. So, while sharpening can return
4954 // kCallSelf, we bypass it here if there is a CHA optimization.
4955 if (dispatch_info_.code_ptr_location == CodePtrLocation::kCallSelf &&
4956 GetBlock()->GetGraph()->HasShouldDeoptimizeFlag()) {
4957 return CodePtrLocation::kCallArtMethod;
4958 } else {
4959 return dispatch_info_.code_ptr_location;
4960 }
4961 }
4962 bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
4963 bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
4964 bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
4965 bool HasPcRelativeMethodLoadKind() const {
4966 return IsPcRelativeMethodLoadKind(GetMethodLoadKind());
4967 }
4968
4969 QuickEntrypointEnum GetStringInitEntryPoint() const {
4970 DCHECK(IsStringInit());
4971 return static_cast<QuickEntrypointEnum>(dispatch_info_.method_load_data);
4972 }
4973
4974 uint64_t GetMethodAddress() const {
4975 DCHECK(HasMethodAddress());
4976 return dispatch_info_.method_load_data;
4977 }
4978
4979 const DexFile& GetDexFileForPcRelativeDexCache() const;
4980
4981 ClinitCheckRequirement GetClinitCheckRequirement() const {
4982 return GetPackedField<ClinitCheckRequirementField>();
4983 }
4984
4985 // Is this instruction a call to a static method?
4986 bool IsStatic() const {
4987 return GetInvokeType() == kStatic;
4988 }
4989
4990 // Does this method load kind need the current method as an input?
4991 static bool NeedsCurrentMethodInput(DispatchInfo dispatch_info) {
4992 return dispatch_info.method_load_kind == MethodLoadKind::kRecursive ||
4993 dispatch_info.method_load_kind == MethodLoadKind::kRuntimeCall ||
4994 dispatch_info.code_ptr_location == CodePtrLocation::kCallCriticalNative;
4995 }
4996
4997 // Get the index of the current method input.
4998 size_t GetCurrentMethodIndex() const {
4999 DCHECK(HasCurrentMethodInput());
5000 return GetCurrentMethodIndexUnchecked();
5001 }
5002 size_t GetCurrentMethodIndexUnchecked() const {
5003 return GetNumberOfArguments();
5004 }
5005
5006 // Check if the method has a current method input.
5007 bool HasCurrentMethodInput() const {
5008 if (NeedsCurrentMethodInput(GetDispatchInfo())) {
5009 DCHECK(InputAt(GetCurrentMethodIndexUnchecked()) == nullptr || // During argument setup.
5010 InputAt(GetCurrentMethodIndexUnchecked())->IsCurrentMethod());
5011 return true;
5012 } else {
5013 DCHECK(InputCount() == GetCurrentMethodIndexUnchecked() ||
5014 InputAt(GetCurrentMethodIndexUnchecked()) == nullptr || // During argument setup.
5015 !InputAt(GetCurrentMethodIndexUnchecked())->IsCurrentMethod());
5016 return false;
5017 }
5018 }
5019
5020 // Get the index of the special input.
5021 size_t GetSpecialInputIndex() const {
5022 DCHECK(HasSpecialInput());
5023 return GetSpecialInputIndexUnchecked();
5024 }
5025 size_t GetSpecialInputIndexUnchecked() const {
5026 return GetNumberOfArguments() + (HasCurrentMethodInput() ? 1u : 0u);
5027 }
5028
5029 // Check if the method has a special input.
5030 bool HasSpecialInput() const {
5031 size_t other_inputs =
5032 GetSpecialInputIndexUnchecked() + (IsStaticWithExplicitClinitCheck() ? 1u : 0u);
5033 size_t input_count = InputCount();
5034 DCHECK_LE(input_count - other_inputs, 1u) << other_inputs << " " << input_count;
5035 return other_inputs != input_count;
5036 }
5037
5038 void AddSpecialInput(HInstruction* input) {
5039 // We allow only one special input.
5040 DCHECK(!HasSpecialInput());
5041 InsertInputAt(GetSpecialInputIndexUnchecked(), input);
5042 }
5043
5044 // Remove the HClinitCheck or the replacement HLoadClass (set as last input by
5045 // PrepareForRegisterAllocation::VisitClinitCheck() in lieu of the initial HClinitCheck)
5046 // instruction; only relevant for static calls with explicit clinit check.
5047 void RemoveExplicitClinitCheck(ClinitCheckRequirement new_requirement) {
5048 DCHECK(IsStaticWithExplicitClinitCheck());
5049 size_t last_input_index = inputs_.size() - 1u;
5050 HInstruction* last_input = inputs_.back().GetInstruction();
5051 DCHECK(last_input != nullptr);
5052 DCHECK(last_input->IsLoadClass() || last_input->IsClinitCheck()) << last_input->DebugName();
5053 RemoveAsUserOfInput(last_input_index);
5054 inputs_.pop_back();
5055 SetPackedField<ClinitCheckRequirementField>(new_requirement);
5056 DCHECK(!IsStaticWithExplicitClinitCheck());
5057 }
5058
5059 // Is this a call to a static method whose declaring class has an
5060 // explicit initialization check in the graph?
5061 bool IsStaticWithExplicitClinitCheck() const {
5062 return IsStatic() && (GetClinitCheckRequirement() == ClinitCheckRequirement::kExplicit);
5063 }
5064
5065 // Is this a call to a static method whose declaring class has an
5066 // implicit intialization check requirement?
5067 bool IsStaticWithImplicitClinitCheck() const {
5068 return IsStatic() && (GetClinitCheckRequirement() == ClinitCheckRequirement::kImplicit);
5069 }
5070
5071 DECLARE_INSTRUCTION(InvokeStaticOrDirect);
5072
5073 protected:
5074 DEFAULT_COPY_CONSTRUCTOR(InvokeStaticOrDirect);
5075
5076 private:
5077 static constexpr size_t kFieldClinitCheckRequirement = kNumberOfInvokePackedBits;
5078 static constexpr size_t kFieldClinitCheckRequirementSize =
5079 MinimumBitsToStore(static_cast<size_t>(ClinitCheckRequirement::kLast));
5080 static constexpr size_t kNumberOfInvokeStaticOrDirectPackedBits =
5081 kFieldClinitCheckRequirement + kFieldClinitCheckRequirementSize;
5082 static_assert(kNumberOfInvokeStaticOrDirectPackedBits <= kMaxNumberOfPackedBits,
5083 "Too many packed fields.");
5084 using ClinitCheckRequirementField = BitField<ClinitCheckRequirement,
5085 kFieldClinitCheckRequirement,
5086 kFieldClinitCheckRequirementSize>;
5087
5088 DispatchInfo dispatch_info_;
5089 };
5090 std::ostream& operator<<(std::ostream& os, MethodLoadKind rhs);
5091 std::ostream& operator<<(std::ostream& os, CodePtrLocation rhs);
5092 std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
5093
5094 class HInvokeVirtual final : public HInvoke {
5095 public:
5096 HInvokeVirtual(ArenaAllocator* allocator,
5097 uint32_t number_of_arguments,
5098 uint32_t number_of_out_vregs,
5099 DataType::Type return_type,
5100 uint32_t dex_pc,
5101 MethodReference method_reference,
5102 ArtMethod* resolved_method,
5103 MethodReference resolved_method_reference,
5104 uint32_t vtable_index,
5105 bool enable_intrinsic_opt)
5106 : HInvoke(kInvokeVirtual,
5107 allocator,
5108 number_of_arguments,
5109 number_of_out_vregs,
5110 0u,
5111 return_type,
5112 dex_pc,
5113 method_reference,
5114 resolved_method,
5115 resolved_method_reference,
5116 kVirtual,
5117 enable_intrinsic_opt),
5118 vtable_index_(vtable_index) {
5119 }
5120
5121 bool IsClonable() const override { return true; }
5122
5123 bool CanDoImplicitNullCheckOn(HInstruction* obj) const override;
5124
5125 uint32_t GetVTableIndex() const { return vtable_index_; }
5126
5127 DECLARE_INSTRUCTION(InvokeVirtual);
5128
5129 protected:
5130 DEFAULT_COPY_CONSTRUCTOR(InvokeVirtual);
5131
5132 private:
5133 // Cached value of the resolved method, to avoid needing the mutator lock.
5134 const uint32_t vtable_index_;
5135 };
5136
5137 class HInvokeInterface final : public HInvoke {
5138 public:
5139 HInvokeInterface(ArenaAllocator* allocator,
5140 uint32_t number_of_arguments,
5141 uint32_t number_of_out_vregs,
5142 DataType::Type return_type,
5143 uint32_t dex_pc,
5144 MethodReference method_reference,
5145 ArtMethod* resolved_method,
5146 MethodReference resolved_method_reference,
5147 uint32_t imt_index,
5148 MethodLoadKind load_kind,
5149 bool enable_intrinsic_opt)
5150 : HInvoke(kInvokeInterface,
5151 allocator,
5152 number_of_arguments + (NeedsCurrentMethod(load_kind) ? 1 : 0),
5153 number_of_out_vregs,
5154 0u,
5155 return_type,
5156 dex_pc,
5157 method_reference,
5158 resolved_method,
5159 resolved_method_reference,
5160 kInterface,
5161 enable_intrinsic_opt),
5162 imt_index_(imt_index),
5163 hidden_argument_load_kind_(load_kind) {
5164 }
5165
5166 static bool NeedsCurrentMethod(MethodLoadKind load_kind) {
5167 return load_kind == MethodLoadKind::kRecursive;
5168 }
5169
5170 bool IsClonable() const override { return true; }
5171 bool NeedsBss() const override {
5172 return GetHiddenArgumentLoadKind() == MethodLoadKind::kBssEntry;
5173 }
5174
5175 bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
5176 // TODO: Add implicit null checks in intrinsics.
5177 return (obj == InputAt(0)) && !IsIntrinsic();
5178 }
5179
5180 size_t GetSpecialInputIndex() const {
5181 return GetNumberOfArguments();
5182 }
5183
5184 void AddSpecialInput(HInstruction* input) {
5185 InsertInputAt(GetSpecialInputIndex(), input);
5186 }
5187
5188 uint32_t GetImtIndex() const { return imt_index_; }
5189 MethodLoadKind GetHiddenArgumentLoadKind() const { return hidden_argument_load_kind_; }
5190
5191 DECLARE_INSTRUCTION(InvokeInterface);
5192
5193 protected:
5194 DEFAULT_COPY_CONSTRUCTOR(InvokeInterface);
5195
5196 private:
5197 // Cached value of the resolved method, to avoid needing the mutator lock.
5198 const uint32_t imt_index_;
5199
5200 // How the hidden argument (the interface method) is being loaded.
5201 const MethodLoadKind hidden_argument_load_kind_;
5202 };
5203
5204 class HNeg final : public HUnaryOperation {
5205 public:
5206 HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
5207 : HUnaryOperation(kNeg, result_type, input, dex_pc) {
5208 DCHECK_EQ(result_type, DataType::Kind(input->GetType()));
5209 }
5210
5211 template <typename T> static T Compute(T x) { return -x; }
5212
5213 HConstant* Evaluate(HIntConstant* x) const override {
5214 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
5215 }
5216 HConstant* Evaluate(HLongConstant* x) const override {
5217 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
5218 }
5219 HConstant* Evaluate(HFloatConstant* x) const override {
5220 return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()));
5221 }
5222 HConstant* Evaluate(HDoubleConstant* x) const override {
5223 return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()));
5224 }
5225
5226 DECLARE_INSTRUCTION(Neg);
5227
5228 protected:
5229 DEFAULT_COPY_CONSTRUCTOR(Neg);
5230 };
5231
5232 class HNewArray final : public HExpression<2> {
5233 public:
5234 HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
5235 : HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
5236 SetRawInputAt(0, cls);
5237 SetRawInputAt(1, length);
5238 SetPackedField<ComponentSizeShiftField>(component_size_shift);
5239 }
5240
5241 bool IsClonable() const override { return true; }
5242
5243 // Calls runtime so needs an environment.
5244 bool NeedsEnvironment() const override { return true; }
5245
5246 // May throw NegativeArraySizeException, OutOfMemoryError, etc.
5247 bool CanThrow() const override { return true; }
5248
5249 bool CanBeNull() const override { return false; }
5250
5251 HLoadClass* GetLoadClass() const {
5252 DCHECK(InputAt(0)->IsLoadClass());
5253 return InputAt(0)->AsLoadClass();
5254 }
5255
5256 HInstruction* GetLength() const {
5257 return InputAt(1);
5258 }
5259
5260 size_t GetComponentSizeShift() {
5261 return GetPackedField<ComponentSizeShiftField>();
5262 }
5263
5264 DECLARE_INSTRUCTION(NewArray);
5265
5266 protected:
5267 DEFAULT_COPY_CONSTRUCTOR(NewArray);
5268
5269 private:
5270 static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
5271 static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
5272 static constexpr size_t kNumberOfNewArrayPackedBits =
5273 kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
5274 static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
5275 using ComponentSizeShiftField =
5276 BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShiftSize>;
5277 };
5278
5279 class HAdd final : public HBinaryOperation {
5280 public:
5281 HAdd(DataType::Type result_type,
5282 HInstruction* left,
5283 HInstruction* right,
5284 uint32_t dex_pc = kNoDexPc)
5285 : HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) {
5286 }
5287
5288 bool IsCommutative() const override { return true; }
5289
5290 template <typename T> static T Compute(T x, T y) { return x + y; }
5291
5292 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5293 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5294 }
5295 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5296 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5297 }
5298 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
5299 return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue(), y->GetValue()));
5300 }
5301 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
5302 return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue(), y->GetValue()));
5303 }
5304
5305 DECLARE_INSTRUCTION(Add);
5306
5307 protected:
5308 DEFAULT_COPY_CONSTRUCTOR(Add);
5309 };
5310
5311 class HSub final : public HBinaryOperation {
5312 public:
5313 HSub(DataType::Type result_type,
5314 HInstruction* left,
5315 HInstruction* right,
5316 uint32_t dex_pc = kNoDexPc)
5317 : HBinaryOperation(kSub, result_type, left, right, SideEffects::None(), dex_pc) {
5318 }
5319
5320 template <typename T> static T Compute(T x, T y) { return x - y; }
5321
5322 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5323 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5324 }
5325 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5326 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5327 }
5328 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
5329 return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue(), y->GetValue()));
5330 }
5331 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
5332 return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue(), y->GetValue()));
5333 }
5334
5335 DECLARE_INSTRUCTION(Sub);
5336
5337 protected:
5338 DEFAULT_COPY_CONSTRUCTOR(Sub);
5339 };
5340
5341 class HMul final : public HBinaryOperation {
5342 public:
5343 HMul(DataType::Type result_type,
5344 HInstruction* left,
5345 HInstruction* right,
5346 uint32_t dex_pc = kNoDexPc)
5347 : HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) {
5348 }
5349
5350 bool IsCommutative() const override { return true; }
5351
5352 template <typename T> static T Compute(T x, T y) { return x * y; }
5353
5354 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5355 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5356 }
5357 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5358 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5359 }
5360 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
5361 return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue(), y->GetValue()));
5362 }
5363 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
5364 return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue(), y->GetValue()));
5365 }
5366
5367 DECLARE_INSTRUCTION(Mul);
5368
5369 protected:
5370 DEFAULT_COPY_CONSTRUCTOR(Mul);
5371 };
5372
5373 class HDiv final : public HBinaryOperation {
5374 public:
5375 HDiv(DataType::Type result_type,
5376 HInstruction* left,
5377 HInstruction* right,
5378 uint32_t dex_pc)
5379 : HBinaryOperation(kDiv, result_type, left, right, SideEffects::None(), dex_pc) {
5380 }
5381
5382 template <typename T>
5383 T ComputeIntegral(T x, T y) const {
5384 DCHECK(!DataType::IsFloatingPointType(GetType())) << GetType();
5385 // Our graph structure ensures we never have 0 for `y` during
5386 // constant folding.
5387 DCHECK_NE(y, 0);
5388 // Special case -1 to avoid getting a SIGFPE on x86(_64).
5389 return (y == -1) ? -x : x / y;
5390 }
5391
5392 template <typename T>
5393 T ComputeFP(T x, T y) const {
5394 DCHECK(DataType::IsFloatingPointType(GetType())) << GetType();
5395 return x / y;
5396 }
5397
5398 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5399 return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5400 }
5401 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5402 return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5403 }
5404 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
5405 return GetBlock()->GetGraph()->GetFloatConstant(ComputeFP(x->GetValue(), y->GetValue()));
5406 }
5407 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
5408 return GetBlock()->GetGraph()->GetDoubleConstant(ComputeFP(x->GetValue(), y->GetValue()));
5409 }
5410
5411 DECLARE_INSTRUCTION(Div);
5412
5413 protected:
5414 DEFAULT_COPY_CONSTRUCTOR(Div);
5415 };
5416
5417 class HRem final : public HBinaryOperation {
5418 public:
5419 HRem(DataType::Type result_type,
5420 HInstruction* left,
5421 HInstruction* right,
5422 uint32_t dex_pc)
5423 : HBinaryOperation(kRem, result_type, left, right, SideEffects::None(), dex_pc) {
5424 }
5425
5426 template <typename T>
5427 T ComputeIntegral(T x, T y) const {
5428 DCHECK(!DataType::IsFloatingPointType(GetType())) << GetType();
5429 // Our graph structure ensures we never have 0 for `y` during
5430 // constant folding.
5431 DCHECK_NE(y, 0);
5432 // Special case -1 to avoid getting a SIGFPE on x86(_64).
5433 return (y == -1) ? 0 : x % y;
5434 }
5435
5436 template <typename T>
5437 T ComputeFP(T x, T y) const {
5438 DCHECK(DataType::IsFloatingPointType(GetType())) << GetType();
5439 return std::fmod(x, y);
5440 }
5441
5442 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5443 return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5444 }
5445 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5446 return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5447 }
5448 HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
5449 return GetBlock()->GetGraph()->GetFloatConstant(ComputeFP(x->GetValue(), y->GetValue()));
5450 }
5451 HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
5452 return GetBlock()->GetGraph()->GetDoubleConstant(ComputeFP(x->GetValue(), y->GetValue()));
5453 }
5454
5455 DECLARE_INSTRUCTION(Rem);
5456
5457 protected:
5458 DEFAULT_COPY_CONSTRUCTOR(Rem);
5459 };
5460
5461 class HMin final : public HBinaryOperation {
5462 public:
5463 HMin(DataType::Type result_type,
5464 HInstruction* left,
5465 HInstruction* right,
5466 uint32_t dex_pc)
5467 : HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
5468
5469 bool IsCommutative() const override { return true; }
5470
5471 // Evaluation for integral values.
5472 template <typename T> static T ComputeIntegral(T x, T y) {
5473 return (x <= y) ? x : y;
5474 }
5475
5476 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5477 return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5478 }
5479 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5480 return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5481 }
5482 // TODO: Evaluation for floating-point values.
5483 HConstant* Evaluate([[maybe_unused]] HFloatConstant* x,
5484 [[maybe_unused]] HFloatConstant* y) const override {
5485 return nullptr;
5486 }
5487 HConstant* Evaluate([[maybe_unused]] HDoubleConstant* x,
5488 [[maybe_unused]] HDoubleConstant* y) const override {
5489 return nullptr;
5490 }
5491
5492 DECLARE_INSTRUCTION(Min);
5493
5494 protected:
5495 DEFAULT_COPY_CONSTRUCTOR(Min);
5496 };
5497
5498 class HMax final : public HBinaryOperation {
5499 public:
5500 HMax(DataType::Type result_type,
5501 HInstruction* left,
5502 HInstruction* right,
5503 uint32_t dex_pc)
5504 : HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
5505
5506 bool IsCommutative() const override { return true; }
5507
5508 // Evaluation for integral values.
5509 template <typename T> static T ComputeIntegral(T x, T y) {
5510 return (x >= y) ? x : y;
5511 }
5512
5513 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5514 return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5515 }
5516 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5517 return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue(), y->GetValue()));
5518 }
5519 // TODO: Evaluation for floating-point values.
5520 HConstant* Evaluate([[maybe_unused]] HFloatConstant* x,
5521 [[maybe_unused]] HFloatConstant* y) const override {
5522 return nullptr;
5523 }
5524 HConstant* Evaluate([[maybe_unused]] HDoubleConstant* x,
5525 [[maybe_unused]] HDoubleConstant* y) const override {
5526 return nullptr;
5527 }
5528
5529 DECLARE_INSTRUCTION(Max);
5530
5531 protected:
5532 DEFAULT_COPY_CONSTRUCTOR(Max);
5533 };
5534
5535 class HAbs final : public HUnaryOperation {
5536 public:
5537 HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
5538 : HUnaryOperation(kAbs, result_type, input, dex_pc) {}
5539
5540 // Evaluation for integral values.
5541 template <typename T> static T ComputeIntegral(T x) {
5542 return x < 0 ? -x : x;
5543 }
5544
5545 // Evaluation for floating-point values.
5546 // Note, as a "quality of implementation", rather than pure "spec compliance",
5547 // we require that Math.abs() clears the sign bit (but changes nothing else)
5548 // for all floating-point numbers, including NaN (signaling NaN may become quiet though).
5549 // http://b/30758343
5550 template <typename T, typename S> static T ComputeFP(T x) {
5551 S bits = bit_cast<S, T>(x);
5552 return bit_cast<T, S>(bits & std::numeric_limits<S>::max());
5553 }
5554
5555 HConstant* Evaluate(HIntConstant* x) const override {
5556 return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()));
5557 }
5558 HConstant* Evaluate(HLongConstant* x) const override {
5559 return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()));
5560 }
5561 HConstant* Evaluate(HFloatConstant* x) const override {
5562 return GetBlock()->GetGraph()->GetFloatConstant(ComputeFP<float, int32_t>(x->GetValue()));
5563 }
5564 HConstant* Evaluate(HDoubleConstant* x) const override {
5565 return GetBlock()->GetGraph()->GetDoubleConstant(ComputeFP<double, int64_t>(x->GetValue()));
5566 }
5567
5568 DECLARE_INSTRUCTION(Abs);
5569
5570 protected:
5571 DEFAULT_COPY_CONSTRUCTOR(Abs);
5572 };
5573
5574 class HDivZeroCheck final : public HExpression<1> {
5575 public:
5576 // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
5577 // constructor. However it can only do it on a fatal slow path so execution never returns to the
5578 // instruction following the current one; thus 'SideEffects::None()' is used.
5579 HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
5580 : HExpression(kDivZeroCheck, value->GetType(), SideEffects::None(), dex_pc) {
5581 SetRawInputAt(0, value);
5582 }
5583
5584 bool IsClonable() const override { return true; }
5585 bool CanBeMoved() const override { return true; }
5586
5587 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
5588 return true;
5589 }
5590
5591 bool NeedsEnvironment() const override { return true; }
5592 bool CanThrow() const override { return true; }
5593
5594 DECLARE_INSTRUCTION(DivZeroCheck);
5595
5596 protected:
5597 DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
5598 };
5599
5600 class HShl final : public HBinaryOperation {
5601 public:
5602 HShl(DataType::Type result_type,
5603 HInstruction* value,
5604 HInstruction* distance,
5605 uint32_t dex_pc = kNoDexPc)
5606 : HBinaryOperation(kShl, result_type, value, distance, SideEffects::None(), dex_pc) {
5607 DCHECK_EQ(result_type, DataType::Kind(value->GetType()));
5608 DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType()));
5609 }
5610
5611 template <typename T>
5612 static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
5613 return value << (distance & max_shift_distance);
5614 }
5615
5616 HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
5617 return GetBlock()->GetGraph()->GetIntConstant(
5618 Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance));
5619 }
5620 HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
5621 return GetBlock()->GetGraph()->GetLongConstant(
5622 Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance));
5623 }
5624
5625 DECLARE_INSTRUCTION(Shl);
5626
5627 protected:
5628 DEFAULT_COPY_CONSTRUCTOR(Shl);
5629 };
5630
5631 class HShr final : public HBinaryOperation {
5632 public:
5633 HShr(DataType::Type result_type,
5634 HInstruction* value,
5635 HInstruction* distance,
5636 uint32_t dex_pc = kNoDexPc)
5637 : HBinaryOperation(kShr, result_type, value, distance, SideEffects::None(), dex_pc) {
5638 DCHECK_EQ(result_type, DataType::Kind(value->GetType()));
5639 DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType()));
5640 }
5641
5642 template <typename T>
5643 static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
5644 return value >> (distance & max_shift_distance);
5645 }
5646
5647 HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
5648 return GetBlock()->GetGraph()->GetIntConstant(
5649 Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance));
5650 }
5651 HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
5652 return GetBlock()->GetGraph()->GetLongConstant(
5653 Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance));
5654 }
5655
5656 DECLARE_INSTRUCTION(Shr);
5657
5658 protected:
5659 DEFAULT_COPY_CONSTRUCTOR(Shr);
5660 };
5661
5662 class HUShr final : public HBinaryOperation {
5663 public:
5664 HUShr(DataType::Type result_type,
5665 HInstruction* value,
5666 HInstruction* distance,
5667 uint32_t dex_pc = kNoDexPc)
5668 : HBinaryOperation(kUShr, result_type, value, distance, SideEffects::None(), dex_pc) {
5669 DCHECK_EQ(result_type, DataType::Kind(value->GetType()));
5670 DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType()));
5671 }
5672
5673 template <typename T>
5674 static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
5675 using V = std::make_unsigned_t<T>;
5676 V ux = static_cast<V>(value);
5677 return static_cast<T>(ux >> (distance & max_shift_distance));
5678 }
5679
5680 HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
5681 return GetBlock()->GetGraph()->GetIntConstant(
5682 Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance));
5683 }
5684 HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
5685 return GetBlock()->GetGraph()->GetLongConstant(
5686 Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance));
5687 }
5688
5689 DECLARE_INSTRUCTION(UShr);
5690
5691 protected:
5692 DEFAULT_COPY_CONSTRUCTOR(UShr);
5693 };
5694
5695 class HAnd final : public HBinaryOperation {
5696 public:
5697 HAnd(DataType::Type result_type,
5698 HInstruction* left,
5699 HInstruction* right,
5700 uint32_t dex_pc = kNoDexPc)
5701 : HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) {
5702 }
5703
5704 bool IsCommutative() const override { return true; }
5705
5706 template <typename T> static T Compute(T x, T y) { return x & y; }
5707
5708 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5709 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5710 }
5711 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5712 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5713 }
5714
5715 DECLARE_INSTRUCTION(And);
5716
5717 protected:
5718 DEFAULT_COPY_CONSTRUCTOR(And);
5719 };
5720
5721 class HOr final : public HBinaryOperation {
5722 public:
5723 HOr(DataType::Type result_type,
5724 HInstruction* left,
5725 HInstruction* right,
5726 uint32_t dex_pc = kNoDexPc)
5727 : HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) {
5728 }
5729
5730 bool IsCommutative() const override { return true; }
5731
5732 template <typename T> static T Compute(T x, T y) { return x | y; }
5733
5734 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5735 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5736 }
5737 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5738 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5739 }
5740
5741 DECLARE_INSTRUCTION(Or);
5742
5743 protected:
5744 DEFAULT_COPY_CONSTRUCTOR(Or);
5745 };
5746
5747 class HXor final : public HBinaryOperation {
5748 public:
5749 HXor(DataType::Type result_type,
5750 HInstruction* left,
5751 HInstruction* right,
5752 uint32_t dex_pc = kNoDexPc)
5753 : HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) {
5754 }
5755
5756 bool IsCommutative() const override { return true; }
5757
5758 template <typename T> static T Compute(T x, T y) { return x ^ y; }
5759
5760 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
5761 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
5762 }
5763 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
5764 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
5765 }
5766
5767 DECLARE_INSTRUCTION(Xor);
5768
5769 protected:
5770 DEFAULT_COPY_CONSTRUCTOR(Xor);
5771 };
5772
5773 class HRor final : public HBinaryOperation {
5774 public:
5775 HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
5776 : HBinaryOperation(kRor, result_type, value, distance) {
5777 }
5778
5779 template <typename T>
5780 static T Compute(T value, int32_t distance, int32_t max_shift_value) {
5781 using V = std::make_unsigned_t<T>;
5782 V ux = static_cast<V>(value);
5783 if ((distance & max_shift_value) == 0) {
5784 return static_cast<T>(ux);
5785 } else {
5786 const V reg_bits = sizeof(T) * 8;
5787 return static_cast<T>(ux >> (distance & max_shift_value)) |
5788 (value << (reg_bits - (distance & max_shift_value)));
5789 }
5790 }
5791
5792 HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
5793 return GetBlock()->GetGraph()->GetIntConstant(
5794 Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance));
5795 }
5796 HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
5797 return GetBlock()->GetGraph()->GetLongConstant(
5798 Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance));
5799 }
5800
5801 DECLARE_INSTRUCTION(Ror);
5802
5803 protected:
5804 DEFAULT_COPY_CONSTRUCTOR(Ror);
5805 };
5806
5807 class HRol final : public HBinaryOperation {
5808 public:
5809 HRol(DataType::Type result_type, HInstruction* value, HInstruction* distance)
5810 : HBinaryOperation(kRol, result_type, value, distance) {}
5811
5812 template <typename T>
5813 static T Compute(T value, int32_t distance, int32_t max_shift_value) {
5814 return HRor::Compute(value, -distance, max_shift_value);
5815 }
5816
5817 HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
5818 return GetBlock()->GetGraph()->GetIntConstant(
5819 Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance));
5820 }
5821 HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
5822 return GetBlock()->GetGraph()->GetLongConstant(
5823 Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance));
5824 }
5825
5826 DECLARE_INSTRUCTION(Rol);
5827
5828 protected:
5829 DEFAULT_COPY_CONSTRUCTOR(Rol);
5830 };
5831
5832 // The value of a parameter in this method. Its location depends on
5833 // the calling convention.
5834 class HParameterValue final : public HExpression<0> {
5835 public:
5836 HParameterValue(const DexFile& dex_file,
5837 dex::TypeIndex type_index,
5838 uint8_t index,
5839 DataType::Type parameter_type,
5840 bool is_this = false)
5841 : HExpression(kParameterValue, parameter_type, SideEffects::None(), kNoDexPc),
5842 dex_file_(dex_file),
5843 type_index_(type_index),
5844 index_(index) {
5845 SetPackedFlag<kFlagIsThis>(is_this);
5846 SetPackedFlag<kFlagCanBeNull>(!is_this);
5847 }
5848
5849 const DexFile& GetDexFile() const { return dex_file_; }
5850 dex::TypeIndex GetTypeIndex() const { return type_index_; }
5851 uint8_t GetIndex() const { return index_; }
5852 bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
5853
5854 bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
5855 void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
5856
5857 DECLARE_INSTRUCTION(ParameterValue);
5858
5859 protected:
5860 DEFAULT_COPY_CONSTRUCTOR(ParameterValue);
5861
5862 private:
5863 // Whether or not the parameter value corresponds to 'this' argument.
5864 static constexpr size_t kFlagIsThis = kNumberOfGenericPackedBits;
5865 static constexpr size_t kFlagCanBeNull = kFlagIsThis + 1;
5866 static constexpr size_t kNumberOfParameterValuePackedBits = kFlagCanBeNull + 1;
5867 static_assert(kNumberOfParameterValuePackedBits <= kMaxNumberOfPackedBits,
5868 "Too many packed fields.");
5869
5870 const DexFile& dex_file_;
5871 const dex::TypeIndex type_index_;
5872 // The index of this parameter in the parameters list. Must be less
5873 // than HGraph::number_of_in_vregs_.
5874 const uint8_t index_;
5875 };
5876
5877 class HNot final : public HUnaryOperation {
5878 public:
5879 HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
5880 : HUnaryOperation(kNot, result_type, input, dex_pc) {
5881 }
5882
5883 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
5884 return true;
5885 }
5886
5887 template <typename T> static T Compute(T x) { return ~x; }
5888
5889 HConstant* Evaluate(HIntConstant* x) const override {
5890 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
5891 }
5892 HConstant* Evaluate(HLongConstant* x) const override {
5893 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
5894 }
5895
5896 DECLARE_INSTRUCTION(Not);
5897
5898 protected:
5899 DEFAULT_COPY_CONSTRUCTOR(Not);
5900 };
5901
5902 class HBooleanNot final : public HUnaryOperation {
5903 public:
5904 explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
5905 : HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) {
5906 }
5907
5908 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
5909 return true;
5910 }
5911
5912 template <typename T> static bool Compute(T x) {
5913 DCHECK(IsUint<1>(x)) << x;
5914 return !x;
5915 }
5916
5917 HConstant* Evaluate(HIntConstant* x) const override {
5918 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
5919 }
5920
5921 DECLARE_INSTRUCTION(BooleanNot);
5922
5923 protected:
5924 DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
5925 };
5926
5927 class HTypeConversion final : public HExpression<1> {
5928 public:
5929 // Instantiate a type conversion of `input` to `result_type`.
5930 HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
5931 : HExpression(kTypeConversion, result_type, SideEffects::None(), dex_pc) {
5932 SetRawInputAt(0, input);
5933 // Invariant: We should never generate a conversion to a Boolean value.
5934 DCHECK_NE(DataType::Type::kBool, result_type);
5935 }
5936
5937 HInstruction* GetInput() const { return InputAt(0); }
5938 DataType::Type GetInputType() const { return GetInput()->GetType(); }
5939 DataType::Type GetResultType() const { return GetType(); }
5940
5941 bool IsClonable() const override { return true; }
5942 bool CanBeMoved() const override { return true; }
5943 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
5944 return true;
5945 }
5946 // Return whether the conversion is implicit. This includes conversion to the same type.
5947 bool IsImplicitConversion() const {
5948 return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
5949 }
5950
5951 // Try to statically evaluate the conversion and return a HConstant
5952 // containing the result. If the input cannot be converted, return nullptr.
5953 HConstant* TryStaticEvaluation() const;
5954
5955 // Same but for `input` instead of GetInput().
5956 HConstant* TryStaticEvaluation(HInstruction* input) const;
5957
5958 DECLARE_INSTRUCTION(TypeConversion);
5959
5960 protected:
5961 DEFAULT_COPY_CONSTRUCTOR(TypeConversion);
5962 };
5963
5964 static constexpr uint32_t kNoRegNumber = -1;
5965
5966 class HNullCheck final : public HExpression<1> {
5967 public:
5968 // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
5969 // constructor. However it can only do it on a fatal slow path so execution never returns to the
5970 // instruction following the current one; thus 'SideEffects::None()' is used.
5971 HNullCheck(HInstruction* value, uint32_t dex_pc)
5972 : HExpression(kNullCheck, value->GetType(), SideEffects::None(), dex_pc) {
5973 SetRawInputAt(0, value);
5974 }
5975
5976 bool IsClonable() const override { return true; }
5977 bool CanBeMoved() const override { return true; }
5978 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
5979 return true;
5980 }
5981
5982 bool NeedsEnvironment() const override { return true; }
5983
5984 bool CanThrow() const override { return true; }
5985
5986 bool CanBeNull() const override { return false; }
5987
5988 DECLARE_INSTRUCTION(NullCheck);
5989
5990 protected:
5991 DEFAULT_COPY_CONSTRUCTOR(NullCheck);
5992 };
5993
5994 // Embeds an ArtField and all the information required by the compiler. We cache
5995 // that information to avoid requiring the mutator lock every time we need it.
5996 class FieldInfo : public ValueObject {
5997 public:
5998 FieldInfo(ArtField* field,
5999 MemberOffset field_offset,
6000 DataType::Type field_type,
6001 bool is_volatile,
6002 uint32_t index,
6003 uint16_t declaring_class_def_index,
6004 const DexFile& dex_file)
6005 : field_(field),
6006 field_offset_(field_offset),
6007 field_type_(field_type),
6008 is_volatile_(is_volatile),
6009 index_(index),
6010 declaring_class_def_index_(declaring_class_def_index),
6011 dex_file_(dex_file) {}
6012
6013 ArtField* GetField() const { return field_; }
6014 MemberOffset GetFieldOffset() const { return field_offset_; }
6015 DataType::Type GetFieldType() const { return field_type_; }
6016 uint32_t GetFieldIndex() const { return index_; }
6017 uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
6018 const DexFile& GetDexFile() const { return dex_file_; }
6019 bool IsVolatile() const { return is_volatile_; }
6020
6021 bool Equals(const FieldInfo& other) const {
6022 return field_ == other.field_ &&
6023 field_offset_ == other.field_offset_ &&
6024 field_type_ == other.field_type_ &&
6025 is_volatile_ == other.is_volatile_ &&
6026 index_ == other.index_ &&
6027 declaring_class_def_index_ == other.declaring_class_def_index_ &&
6028 &dex_file_ == &other.dex_file_;
6029 }
6030
6031 std::ostream& Dump(std::ostream& os) const {
6032 os << field_ << ", off: " << field_offset_ << ", type: " << field_type_
6033 << ", volatile: " << std::boolalpha << is_volatile_ << ", index_: " << std::dec << index_
6034 << ", declaring_class: " << declaring_class_def_index_ << ", dex: " << dex_file_;
6035 return os;
6036 }
6037
6038 private:
6039 ArtField* const field_;
6040 const MemberOffset field_offset_;
6041 const DataType::Type field_type_;
6042 const bool is_volatile_;
6043 const uint32_t index_;
6044 const uint16_t declaring_class_def_index_;
6045 const DexFile& dex_file_;
6046 };
6047
6048 inline bool operator==(const FieldInfo& a, const FieldInfo& b) {
6049 return a.Equals(b);
6050 }
6051
6052 inline std::ostream& operator<<(std::ostream& os, const FieldInfo& a) {
6053 return a.Dump(os);
6054 }
6055
6056 class HFieldAccess : public HInstruction {
6057 public:
6058 HFieldAccess(InstructionKind kind,
6059 SideEffects side_effects,
6060 ArtField* field,
6061 DataType::Type field_type,
6062 MemberOffset field_offset,
6063 bool is_volatile,
6064 uint32_t field_idx,
6065 uint16_t declaring_class_def_index,
6066 const DexFile& dex_file,
6067 uint32_t dex_pc)
6068 : HInstruction(kind, field_type, side_effects, dex_pc),
6069 field_info_(field,
6070 field_offset,
6071 field_type,
6072 is_volatile,
6073 field_idx,
6074 declaring_class_def_index,
6075 dex_file) {}
6076
6077 const FieldInfo& GetFieldInfo() const { return field_info_; }
6078 MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
6079 DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
6080 bool IsVolatile() const { return field_info_.IsVolatile(); }
6081
6082 DECLARE_ABSTRACT_INSTRUCTION(FieldAccess);
6083
6084 protected:
6085 DEFAULT_COPY_CONSTRUCTOR(FieldAccess);
6086
6087 private:
6088 const FieldInfo field_info_;
6089 };
6090
6091 class HInstanceFieldGet final : public HExpression<1, HFieldAccess> {
6092 public:
6093 HInstanceFieldGet(HInstruction* object,
6094 ArtField* field,
6095 DataType::Type field_type,
6096 MemberOffset field_offset,
6097 bool is_volatile,
6098 uint32_t field_idx,
6099 uint16_t declaring_class_def_index,
6100 const DexFile& dex_file,
6101 uint32_t dex_pc)
6102 : HExpression(kInstanceFieldGet,
6103 SideEffects::FieldReadOfType(field_type, is_volatile),
6104 field,
6105 field_type,
6106 field_offset,
6107 is_volatile,
6108 field_idx,
6109 declaring_class_def_index,
6110 dex_file,
6111 dex_pc) {
6112 SetRawInputAt(0, object);
6113 }
6114
6115 bool IsClonable() const override { return true; }
6116 bool CanBeMoved() const override { return !IsVolatile(); }
6117
6118 bool InstructionDataEquals(const HInstruction* other) const override {
6119 const HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
6120 return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
6121 }
6122
6123 bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
6124 return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
6125 }
6126
6127 size_t ComputeHashCode() const override {
6128 return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
6129 }
6130
6131 void SetType(DataType::Type new_type) {
6132 DCHECK(DataType::IsIntegralType(GetType()));
6133 DCHECK(DataType::IsIntegralType(new_type));
6134 DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
6135 SetPackedField<TypeField>(new_type);
6136 }
6137
6138 DECLARE_INSTRUCTION(InstanceFieldGet);
6139
6140 protected:
6141 DEFAULT_COPY_CONSTRUCTOR(InstanceFieldGet);
6142 };
6143
6144 enum class WriteBarrierKind {
6145 // Emit the write barrier. This write barrier is not being relied on so e.g. codegen can decide to
6146 // skip it if the value stored is null. This is the default behavior.
6147 kEmitNotBeingReliedOn,
6148 // Emit the write barrier. This write barrier is being relied on and must be emitted.
6149 kEmitBeingReliedOn,
6150 // Skip emitting the write barrier. This could be set because:
6151 // A) The write barrier is not needed (i.e. it is not a reference, or the value is the null
6152 // constant)
6153 // B) This write barrier was coalesced into another one so there's no need to emit it.
6154 kDontEmit,
6155 kLast = kDontEmit
6156 };
6157 std::ostream& operator<<(std::ostream& os, WriteBarrierKind rhs);
6158
6159 class HInstanceFieldSet final : public HExpression<2, HFieldAccess> {
6160 public:
6161 HInstanceFieldSet(HInstruction* object,
6162 HInstruction* value,
6163 ArtField* field,
6164 DataType::Type field_type,
6165 MemberOffset field_offset,
6166 bool is_volatile,
6167 uint32_t field_idx,
6168 uint16_t declaring_class_def_index,
6169 const DexFile& dex_file,
6170 uint32_t dex_pc)
6171 : HExpression(kInstanceFieldSet,
6172 SideEffects::FieldWriteOfType(field_type, is_volatile),
6173 field,
6174 field_type,
6175 field_offset,
6176 is_volatile,
6177 field_idx,
6178 declaring_class_def_index,
6179 dex_file,
6180 dex_pc) {
6181 SetPackedFlag<kFlagValueCanBeNull>(true);
6182 SetPackedField<WriteBarrierKindField>(
6183 field_type == DataType::Type::kReference
6184 ? WriteBarrierKind::kEmitNotBeingReliedOn
6185 : WriteBarrierKind::kDontEmit);
6186 SetRawInputAt(0, object);
6187 SetRawInputAt(1, value);
6188 }
6189
6190 bool IsClonable() const override { return true; }
6191
6192 bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
6193 return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
6194 }
6195
6196 HInstruction* GetValue() const { return InputAt(1); }
6197 bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
6198 void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
6199 WriteBarrierKind GetWriteBarrierKind() { return GetPackedField<WriteBarrierKindField>(); }
6200 void SetWriteBarrierKind(WriteBarrierKind kind) {
6201 DCHECK(kind != WriteBarrierKind::kEmitNotBeingReliedOn)
6202 << "We shouldn't go back to the original value.";
6203 DCHECK_IMPLIES(kind == WriteBarrierKind::kDontEmit,
6204 GetWriteBarrierKind() != WriteBarrierKind::kEmitBeingReliedOn)
6205 << "If a write barrier was relied on by other write barriers, we cannot skip emitting it.";
6206 SetPackedField<WriteBarrierKindField>(kind);
6207 }
6208
6209 DECLARE_INSTRUCTION(InstanceFieldSet);
6210
6211 protected:
6212 DEFAULT_COPY_CONSTRUCTOR(InstanceFieldSet);
6213
6214 private:
6215 static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
6216 static constexpr size_t kWriteBarrierKind = kFlagValueCanBeNull + 1;
6217 static constexpr size_t kWriteBarrierKindSize =
6218 MinimumBitsToStore(static_cast<size_t>(WriteBarrierKind::kLast));
6219 static constexpr size_t kNumberOfInstanceFieldSetPackedBits =
6220 kWriteBarrierKind + kWriteBarrierKindSize;
6221 static_assert(kNumberOfInstanceFieldSetPackedBits <= kMaxNumberOfPackedBits,
6222 "Too many packed fields.");
6223
6224 using WriteBarrierKindField =
6225 BitField<WriteBarrierKind, kWriteBarrierKind, kWriteBarrierKindSize>;
6226 };
6227
6228 class HArrayGet final : public HExpression<2> {
6229 public:
6230 HArrayGet(HInstruction* array,
6231 HInstruction* index,
6232 DataType::Type type,
6233 uint32_t dex_pc)
6234 : HArrayGet(array,
6235 index,
6236 type,
6237 SideEffects::ArrayReadOfType(type),
6238 dex_pc,
6239 /* is_string_char_at= */ false) {
6240 }
6241
6242 HArrayGet(HInstruction* array,
6243 HInstruction* index,
6244 DataType::Type type,
6245 SideEffects side_effects,
6246 uint32_t dex_pc,
6247 bool is_string_char_at)
6248 : HExpression(kArrayGet, type, side_effects, dex_pc) {
6249 SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at);
6250 SetRawInputAt(0, array);
6251 SetRawInputAt(1, index);
6252 }
6253
6254 bool IsClonable() const override { return true; }
6255 bool CanBeMoved() const override { return true; }
6256 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
6257 return true;
6258 }
6259 bool CanDoImplicitNullCheckOn([[maybe_unused]] HInstruction* obj) const override {
6260 // TODO: We can be smarter here.
6261 // Currently, unless the array is the result of NewArray, the array access is always
6262 // preceded by some form of null NullCheck necessary for the bounds check, usually
6263 // implicit null check on the ArrayLength input to BoundsCheck or Deoptimize for
6264 // dynamic BCE. There are cases when these could be removed to produce better code.
6265 // If we ever add optimizations to do so we should allow an implicit check here
6266 // (as long as the address falls in the first page).
6267 //
6268 // As an example of such fancy optimization, we could eliminate BoundsCheck for
6269 // a = cond ? new int[1] : null;
6270 // a[0]; // The Phi does not need bounds check for either input.
6271 return false;
6272 }
6273
6274 bool IsEquivalentOf(HArrayGet* other) const {
6275 bool result = (GetDexPc() == other->GetDexPc());
6276 if (kIsDebugBuild && result) {
6277 DCHECK_EQ(GetBlock(), other->GetBlock());
6278 DCHECK_EQ(GetArray(), other->GetArray());
6279 DCHECK_EQ(GetIndex(), other->GetIndex());
6280 if (DataType::IsIntOrLongType(GetType())) {
6281 DCHECK(DataType::IsFloatingPointType(other->GetType())) << other->GetType();
6282 } else {
6283 DCHECK(DataType::IsFloatingPointType(GetType())) << GetType();
6284 DCHECK(DataType::IsIntOrLongType(other->GetType())) << other->GetType();
6285 }
6286 }
6287 return result;
6288 }
6289
6290 bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
6291
6292 HInstruction* GetArray() const { return InputAt(0); }
6293 HInstruction* GetIndex() const { return InputAt(1); }
6294
6295 void SetType(DataType::Type new_type) {
6296 DCHECK(DataType::IsIntegralType(GetType()));
6297 DCHECK(DataType::IsIntegralType(new_type));
6298 DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
6299 SetPackedField<TypeField>(new_type);
6300 }
6301
6302 DECLARE_INSTRUCTION(ArrayGet);
6303
6304 protected:
6305 DEFAULT_COPY_CONSTRUCTOR(ArrayGet);
6306
6307 private:
6308 // We treat a String as an array, creating the HArrayGet from String.charAt()
6309 // intrinsic in the instruction simplifier. We can always determine whether
6310 // a particular HArrayGet is actually a String.charAt() by looking at the type
6311 // of the input but that requires holding the mutator lock, so we prefer to use
6312 // a flag, so that code generators don't need to do the locking.
6313 static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
6314 static constexpr size_t kNumberOfArrayGetPackedBits = kFlagIsStringCharAt + 1;
6315 static_assert(kNumberOfArrayGetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
6316 "Too many packed fields.");
6317 };
6318
6319 class HArraySet final : public HExpression<3> {
6320 public:
6321 HArraySet(HInstruction* array,
6322 HInstruction* index,
6323 HInstruction* value,
6324 DataType::Type expected_component_type,
6325 uint32_t dex_pc)
6326 : HArraySet(array,
6327 index,
6328 value,
6329 expected_component_type,
6330 // Make a best guess for side effects now, may be refined during SSA building.
6331 ComputeSideEffects(GetComponentType(value->GetType(), expected_component_type)),
6332 dex_pc) {
6333 }
6334
6335 HArraySet(HInstruction* array,
6336 HInstruction* index,
6337 HInstruction* value,
6338 DataType::Type expected_component_type,
6339 SideEffects side_effects,
6340 uint32_t dex_pc)
6341 : HExpression(kArraySet, side_effects, dex_pc) {
6342 SetPackedField<ExpectedComponentTypeField>(expected_component_type);
6343 SetPackedFlag<kFlagNeedsTypeCheck>(value->GetType() == DataType::Type::kReference);
6344 SetPackedFlag<kFlagValueCanBeNull>(true);
6345 SetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>(false);
6346 SetPackedField<WriteBarrierKindField>(
6347 value->GetType() == DataType::Type::kReference
6348 ? WriteBarrierKind::kEmitNotBeingReliedOn
6349 : WriteBarrierKind::kDontEmit);
6350 SetRawInputAt(0, array);
6351 SetRawInputAt(1, index);
6352 SetRawInputAt(2, value);
6353 }
6354
6355 bool IsClonable() const override { return true; }
6356
6357 bool NeedsEnvironment() const override {
6358 // We call a runtime method to throw ArrayStoreException.
6359 return NeedsTypeCheck();
6360 }
6361
6362 // Can throw ArrayStoreException.
6363 bool CanThrow() const override { return NeedsTypeCheck(); }
6364
6365 bool CanDoImplicitNullCheckOn([[maybe_unused]] HInstruction* obj) const override {
6366 // TODO: Same as for ArrayGet.
6367 return false;
6368 }
6369
6370 void ClearTypeCheck() {
6371 SetPackedFlag<kFlagNeedsTypeCheck>(false);
6372 // Clear the `CanTriggerGC` flag too as we can only trigger a GC when doing a type check.
6373 SetSideEffects(GetSideEffects().Exclusion(SideEffects::CanTriggerGC()));
6374 // Clear the environment too as we can only throw if we need a type check.
6375 RemoveEnvironment();
6376 }
6377
6378 void ClearValueCanBeNull() {
6379 SetPackedFlag<kFlagValueCanBeNull>(false);
6380 }
6381
6382 void SetStaticTypeOfArrayIsObjectArray() {
6383 SetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>(true);
6384 }
6385
6386 bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
6387 bool NeedsTypeCheck() const { return GetPackedFlag<kFlagNeedsTypeCheck>(); }
6388 bool StaticTypeOfArrayIsObjectArray() const {
6389 return GetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>();
6390 }
6391
6392 HInstruction* GetArray() const { return InputAt(0); }
6393 HInstruction* GetIndex() const { return InputAt(1); }
6394 HInstruction* GetValue() const { return InputAt(2); }
6395
6396 DataType::Type GetComponentType() const {
6397 return GetComponentType(GetValue()->GetType(), GetRawExpectedComponentType());
6398 }
6399
6400 static DataType::Type GetComponentType(DataType::Type value_type,
6401 DataType::Type expected_component_type) {
6402 // The Dex format does not type floating point index operations. Since the
6403 // `expected_component_type` comes from SSA building and can therefore not
6404 // be correct, we also check what is the value type. If it is a floating
6405 // point type, we must use that type.
6406 return ((value_type == DataType::Type::kFloat32) || (value_type == DataType::Type::kFloat64))
6407 ? value_type
6408 : expected_component_type;
6409 }
6410
6411 DataType::Type GetRawExpectedComponentType() const {
6412 return GetPackedField<ExpectedComponentTypeField>();
6413 }
6414
6415 static SideEffects ComputeSideEffects(DataType::Type type) {
6416 return SideEffects::ArrayWriteOfType(type).Union(SideEffectsForArchRuntimeCalls(type));
6417 }
6418
6419 static SideEffects SideEffectsForArchRuntimeCalls(DataType::Type value_type) {
6420 return (value_type == DataType::Type::kReference) ? SideEffects::CanTriggerGC()
6421 : SideEffects::None();
6422 }
6423
6424 WriteBarrierKind GetWriteBarrierKind() { return GetPackedField<WriteBarrierKindField>(); }
6425
6426 void SetWriteBarrierKind(WriteBarrierKind kind) {
6427 DCHECK(kind != WriteBarrierKind::kEmitNotBeingReliedOn)
6428 << "We shouldn't go back to the original value.";
6429 DCHECK_IMPLIES(kind == WriteBarrierKind::kDontEmit,
6430 GetWriteBarrierKind() != WriteBarrierKind::kEmitBeingReliedOn)
6431 << "If a write barrier was relied on by other write barriers, we cannot skip emitting it.";
6432 SetPackedField<WriteBarrierKindField>(kind);
6433 }
6434
6435 DECLARE_INSTRUCTION(ArraySet);
6436
6437 protected:
6438 DEFAULT_COPY_CONSTRUCTOR(ArraySet);
6439
6440 private:
6441 static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits;
6442 static constexpr size_t kFieldExpectedComponentTypeSize =
6443 MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
6444 static constexpr size_t kFlagNeedsTypeCheck =
6445 kFieldExpectedComponentType + kFieldExpectedComponentTypeSize;
6446 static constexpr size_t kFlagValueCanBeNull = kFlagNeedsTypeCheck + 1;
6447 // Cached information for the reference_type_info_ so that codegen
6448 // does not need to inspect the static type.
6449 static constexpr size_t kFlagStaticTypeOfArrayIsObjectArray = kFlagValueCanBeNull + 1;
6450 static constexpr size_t kWriteBarrierKind = kFlagStaticTypeOfArrayIsObjectArray + 1;
6451 static constexpr size_t kWriteBarrierKindSize =
6452 MinimumBitsToStore(static_cast<size_t>(WriteBarrierKind::kLast));
6453 static constexpr size_t kNumberOfArraySetPackedBits = kWriteBarrierKind + kWriteBarrierKindSize;
6454 static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
6455 using ExpectedComponentTypeField =
6456 BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
6457
6458 using WriteBarrierKindField =
6459 BitField<WriteBarrierKind, kWriteBarrierKind, kWriteBarrierKindSize>;
6460 };
6461
6462 class HArrayLength final : public HExpression<1> {
6463 public:
6464 HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
6465 : HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) {
6466 SetPackedFlag<kFlagIsStringLength>(is_string_length);
6467 // Note that arrays do not change length, so the instruction does not
6468 // depend on any write.
6469 SetRawInputAt(0, array);
6470 }
6471
6472 bool IsClonable() const override { return true; }
6473 bool CanBeMoved() const override { return true; }
6474 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
6475 return true;
6476 }
6477 bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
6478 return obj == InputAt(0);
6479 }
6480
6481 bool IsStringLength() const { return GetPackedFlag<kFlagIsStringLength>(); }
6482
6483 DECLARE_INSTRUCTION(ArrayLength);
6484
6485 protected:
6486 DEFAULT_COPY_CONSTRUCTOR(ArrayLength);
6487
6488 private:
6489 // We treat a String as an array, creating the HArrayLength from String.length()
6490 // or String.isEmpty() intrinsic in the instruction simplifier. We can always
6491 // determine whether a particular HArrayLength is actually a String.length() by
6492 // looking at the type of the input but that requires holding the mutator lock, so
6493 // we prefer to use a flag, so that code generators don't need to do the locking.
6494 static constexpr size_t kFlagIsStringLength = kNumberOfGenericPackedBits;
6495 static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1;
6496 static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits,
6497 "Too many packed fields.");
6498 };
6499
6500 class HBoundsCheck final : public HExpression<2> {
6501 public:
6502 // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
6503 // constructor. However it can only do it on a fatal slow path so execution never returns to the
6504 // instruction following the current one; thus 'SideEffects::None()' is used.
6505 HBoundsCheck(HInstruction* index,
6506 HInstruction* length,
6507 uint32_t dex_pc,
6508 bool is_string_char_at = false)
6509 : HExpression(kBoundsCheck, index->GetType(), SideEffects::None(), dex_pc) {
6510 DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(index->GetType()));
6511 SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at);
6512 SetRawInputAt(0, index);
6513 SetRawInputAt(1, length);
6514 }
6515
6516 bool IsClonable() const override { return true; }
6517 bool CanBeMoved() const override { return true; }
6518 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
6519 return true;
6520 }
6521
6522 bool NeedsEnvironment() const override { return true; }
6523
6524 bool CanThrow() const override { return true; }
6525
6526 bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
6527
6528 HInstruction* GetIndex() const { return InputAt(0); }
6529
6530 DECLARE_INSTRUCTION(BoundsCheck);
6531
6532 protected:
6533 DEFAULT_COPY_CONSTRUCTOR(BoundsCheck);
6534
6535 private:
6536 static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
6537 static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
6538 static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
6539 "Too many packed fields.");
6540 };
6541
6542 class HSuspendCheck final : public HExpression<0> {
6543 public:
6544 explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc, bool is_no_op = false)
6545 : HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc),
6546 slow_path_(nullptr) {
6547 SetPackedFlag<kFlagIsNoOp>(is_no_op);
6548 }
6549
6550 bool IsClonable() const override { return true; }
6551
6552 bool NeedsEnvironment() const override {
6553 return true;
6554 }
6555
6556 void SetIsNoOp(bool is_no_op) { SetPackedFlag<kFlagIsNoOp>(is_no_op); }
6557 bool IsNoOp() const { return GetPackedFlag<kFlagIsNoOp>(); }
6558
6559
6560 void SetSlowPath(SlowPathCode* slow_path) { slow_path_ = slow_path; }
6561 SlowPathCode* GetSlowPath() const { return slow_path_; }
6562
6563 DECLARE_INSTRUCTION(SuspendCheck);
6564
6565 protected:
6566 DEFAULT_COPY_CONSTRUCTOR(SuspendCheck);
6567
6568 // True if the HSuspendCheck should not emit any code during codegen. It is
6569 // not possible to simply remove this instruction to disable codegen, as
6570 // other optimizations (e.g: CHAGuardVisitor::HoistGuard) depend on
6571 // HSuspendCheck being present in every loop.
6572 static constexpr size_t kFlagIsNoOp = kNumberOfGenericPackedBits;
6573 static constexpr size_t kNumberOfSuspendCheckPackedBits = kFlagIsNoOp + 1;
6574 static_assert(kNumberOfSuspendCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
6575 "Too many packed fields.");
6576
6577 private:
6578 // Only used for code generation, in order to share the same slow path between back edges
6579 // of a same loop.
6580 SlowPathCode* slow_path_;
6581 };
6582
6583 // Pseudo-instruction which doesn't generate any code.
6584 // If `emit_environment` is true, it can be used to generate an environment. It is used, for
6585 // example, to provide the native debugger with mapping information. It ensures that we can generate
6586 // line number and local variables at this point.
6587 class HNop : public HExpression<0> {
6588 public:
6589 explicit HNop(uint32_t dex_pc, bool needs_environment)
6590 : HExpression<0>(kNop, SideEffects::None(), dex_pc), needs_environment_(needs_environment) {
6591 }
6592
6593 bool NeedsEnvironment() const override {
6594 return needs_environment_;
6595 }
6596
6597 DECLARE_INSTRUCTION(Nop);
6598
6599 protected:
6600 DEFAULT_COPY_CONSTRUCTOR(Nop);
6601
6602 private:
6603 bool needs_environment_;
6604 };
6605
6606 /**
6607 * Instruction to load a Class object.
6608 */
6609 class HLoadClass final : public HInstruction {
6610 public:
6611 // Determines how to load the Class.
6612 enum class LoadKind {
6613 // We cannot load this class. See HSharpening::SharpenLoadClass.
6614 kInvalid = -1,
6615
6616 // Use the Class* from the method's own ArtMethod*.
6617 kReferrersClass,
6618
6619 // Use PC-relative boot image Class* address that will be known at link time.
6620 // Used for boot image classes referenced by boot image code.
6621 kBootImageLinkTimePcRelative,
6622
6623 // Load from a boot image entry in the .data.img.rel.ro using a PC-relative load.
6624 // Used for boot image classes referenced by apps in AOT-compiled code.
6625 kBootImageRelRo,
6626
6627 // Load from an app image entry in the .data.img.rel.ro using a PC-relative load.
6628 // Used for app image classes referenced by apps in AOT-compiled code.
6629 kAppImageRelRo,
6630
6631 // Load from an entry in the .bss section using a PC-relative load.
6632 // Used for classes outside boot image referenced by AOT-compiled app and boot image code.
6633 kBssEntry,
6634
6635 // Load from an entry for public class in the .bss section using a PC-relative load.
6636 // Used for classes that were unresolved during AOT-compilation outside the literal
6637 // package of the compiling class. Such classes are accessible only if they are public
6638 // and the .bss entry shall therefore be filled only if the resolved class is public.
6639 kBssEntryPublic,
6640
6641 // Load from an entry for package class in the .bss section using a PC-relative load.
6642 // Used for classes that were unresolved during AOT-compilation but within the literal
6643 // package of the compiling class. Such classes are accessible if they are public or
6644 // in the same package which, given the literal package match, requires only matching
6645 // defining class loader and the .bss entry shall therefore be filled only if at least
6646 // one of those conditions holds. Note that all code in an oat file belongs to classes
6647 // with the same defining class loader.
6648 kBssEntryPackage,
6649
6650 // Use a known boot image Class* address, embedded in the code by the codegen.
6651 // Used for boot image classes referenced by apps in JIT-compiled code.
6652 kJitBootImageAddress,
6653
6654 // Load from the root table associated with the JIT compiled method.
6655 kJitTableAddress,
6656
6657 // Load using a simple runtime call. This is the fall-back load kind when
6658 // the codegen is unable to use another appropriate kind.
6659 kRuntimeCall,
6660
6661 kLast = kRuntimeCall
6662 };
6663
6664 HLoadClass(HCurrentMethod* current_method,
6665 dex::TypeIndex type_index,
6666 const DexFile& dex_file,
6667 Handle<mirror::Class> klass,
6668 bool is_referrers_class,
6669 uint32_t dex_pc,
6670 bool needs_access_check)
6671 : HInstruction(kLoadClass,
6672 DataType::Type::kReference,
6673 SideEffectsForArchRuntimeCalls(),
6674 dex_pc),
6675 special_input_(HUserRecord<HInstruction*>(current_method)),
6676 type_index_(type_index),
6677 dex_file_(dex_file),
6678 klass_(klass) {
6679 // Referrers class should not need access check. We never inline unverified
6680 // methods so we can't possibly end up in this situation.
6681 DCHECK_IMPLIES(is_referrers_class, !needs_access_check);
6682
6683 SetPackedField<LoadKindField>(
6684 is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kRuntimeCall);
6685 SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
6686 SetPackedFlag<kFlagIsInImage>(false);
6687 SetPackedFlag<kFlagGenerateClInitCheck>(false);
6688 SetPackedFlag<kFlagValidLoadedClassRTI>(false);
6689 }
6690
6691 bool IsClonable() const override { return true; }
6692
6693 void SetLoadKind(LoadKind load_kind);
6694
6695 LoadKind GetLoadKind() const {
6696 return GetPackedField<LoadKindField>();
6697 }
6698
6699 bool HasPcRelativeLoadKind() const {
6700 return GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
6701 GetLoadKind() == LoadKind::kBootImageRelRo ||
6702 GetLoadKind() == LoadKind::kAppImageRelRo ||
6703 GetLoadKind() == LoadKind::kBssEntry ||
6704 GetLoadKind() == LoadKind::kBssEntryPublic ||
6705 GetLoadKind() == LoadKind::kBssEntryPackage;
6706 }
6707
6708 bool CanBeMoved() const override { return true; }
6709
6710 bool InstructionDataEquals(const HInstruction* other) const override;
6711
6712 size_t ComputeHashCode() const override { return type_index_.index_; }
6713
6714 bool CanBeNull() const override { return false; }
6715
6716 bool NeedsEnvironment() const override {
6717 return CanCallRuntime();
6718 }
6719 bool NeedsBss() const override {
6720 LoadKind load_kind = GetLoadKind();
6721 return load_kind == LoadKind::kBssEntry ||
6722 load_kind == LoadKind::kBssEntryPublic ||
6723 load_kind == LoadKind::kBssEntryPackage;
6724 }
6725
6726 void SetMustGenerateClinitCheck(bool generate_clinit_check) {
6727 SetPackedFlag<kFlagGenerateClInitCheck>(generate_clinit_check);
6728 }
6729
6730 bool CanCallRuntime() const {
6731 return NeedsAccessCheck() ||
6732 MustGenerateClinitCheck() ||
6733 NeedsBss() ||
6734 GetLoadKind() == LoadKind::kRuntimeCall;
6735 }
6736
6737 bool CanThrow() const override {
6738 return NeedsAccessCheck() ||
6739 MustGenerateClinitCheck() ||
6740 // If the class is in the boot or app image, the lookup in the runtime call cannot throw.
6741 ((GetLoadKind() == LoadKind::kRuntimeCall || NeedsBss()) && !IsInImage());
6742 }
6743
6744 ReferenceTypeInfo GetLoadedClassRTI() {
6745 if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
6746 // Note: The is_exact flag from the return value should not be used.
6747 return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
6748 } else {
6749 return ReferenceTypeInfo::CreateInvalid();
6750 }
6751 }
6752
6753 // Loaded class RTI is marked as valid by RTP if the klass_ is admissible.
6754 void SetValidLoadedClassRTI() {
6755 DCHECK(klass_ != nullptr);
6756 SetPackedFlag<kFlagValidLoadedClassRTI>(true);
6757 }
6758
6759 dex::TypeIndex GetTypeIndex() const { return type_index_; }
6760 const DexFile& GetDexFile() const { return dex_file_; }
6761
6762 static SideEffects SideEffectsForArchRuntimeCalls() {
6763 return SideEffects::CanTriggerGC();
6764 }
6765
6766 bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
6767 bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
6768 bool IsInImage() const { return GetPackedFlag<kFlagIsInImage>(); }
6769 bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
6770
6771 bool MustResolveTypeOnSlowPath() const {
6772 // Check that this instruction has a slow path.
6773 LoadKind load_kind = GetLoadKind();
6774 DCHECK(load_kind != LoadKind::kRuntimeCall); // kRuntimeCall calls on main path.
6775 bool must_resolve_type_on_slow_path =
6776 load_kind == LoadKind::kBssEntry ||
6777 load_kind == LoadKind::kBssEntryPublic ||
6778 load_kind == LoadKind::kBssEntryPackage;
6779 DCHECK(must_resolve_type_on_slow_path || MustGenerateClinitCheck());
6780 return must_resolve_type_on_slow_path;
6781 }
6782
6783 void MarkInImage() {
6784 SetPackedFlag<kFlagIsInImage>(true);
6785 }
6786
6787 void AddSpecialInput(HInstruction* special_input);
6788
6789 using HInstruction::GetInputRecords; // Keep the const version visible.
6790 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
6791 return ArrayRef<HUserRecord<HInstruction*>>(
6792 &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
6793 }
6794
6795 Handle<mirror::Class> GetClass() const {
6796 return klass_;
6797 }
6798
6799 DECLARE_INSTRUCTION(LoadClass);
6800
6801 protected:
6802 DEFAULT_COPY_CONSTRUCTOR(LoadClass);
6803
6804 private:
6805 static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
6806 // Whether the type is in an image (boot image or app image).
6807 static constexpr size_t kFlagIsInImage = kFlagNeedsAccessCheck + 1;
6808 // Whether this instruction must generate the initialization check.
6809 // Used for code generation.
6810 static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInImage + 1;
6811 static constexpr size_t kFieldLoadKind = kFlagGenerateClInitCheck + 1;
6812 static constexpr size_t kFieldLoadKindSize =
6813 MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast));
6814 static constexpr size_t kFlagValidLoadedClassRTI = kFieldLoadKind + kFieldLoadKindSize;
6815 static constexpr size_t kNumberOfLoadClassPackedBits = kFlagValidLoadedClassRTI + 1;
6816 static_assert(kNumberOfLoadClassPackedBits < kMaxNumberOfPackedBits, "Too many packed fields.");
6817 using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
6818
6819 static bool HasTypeReference(LoadKind load_kind) {
6820 return load_kind == LoadKind::kReferrersClass ||
6821 load_kind == LoadKind::kBootImageLinkTimePcRelative ||
6822 load_kind == LoadKind::kAppImageRelRo ||
6823 load_kind == LoadKind::kBssEntry ||
6824 load_kind == LoadKind::kBssEntryPublic ||
6825 load_kind == LoadKind::kBssEntryPackage ||
6826 load_kind == LoadKind::kRuntimeCall;
6827 }
6828
6829 void SetLoadKindInternal(LoadKind load_kind);
6830
6831 // The special input is the HCurrentMethod for kRuntimeCall or kReferrersClass.
6832 // For other load kinds it's empty or possibly some architecture-specific instruction
6833 // for PC-relative loads, i.e. kBssEntry* or kBootImageLinkTimePcRelative.
6834 HUserRecord<HInstruction*> special_input_;
6835
6836 // A type index and dex file where the class can be accessed. The dex file can be:
6837 // - The compiling method's dex file if the class is defined there too.
6838 // - The compiling method's dex file if the class is referenced there.
6839 // - The dex file where the class is defined. When the load kind can only be
6840 // kBssEntry* or kRuntimeCall, we cannot emit code for this `HLoadClass`.
6841 const dex::TypeIndex type_index_;
6842 const DexFile& dex_file_;
6843
6844 Handle<mirror::Class> klass_;
6845 };
6846 std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
6847
6848 // Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
6849 inline void HLoadClass::SetLoadKind(LoadKind load_kind) {
6850 // The load kind should be determined before inserting the instruction to the graph.
6851 DCHECK(GetBlock() == nullptr);
6852 DCHECK(GetEnvironment() == nullptr);
6853 SetPackedField<LoadKindField>(load_kind);
6854 if (load_kind != LoadKind::kRuntimeCall && load_kind != LoadKind::kReferrersClass) {
6855 special_input_ = HUserRecord<HInstruction*>(nullptr);
6856 }
6857 if (!NeedsEnvironment()) {
6858 SetSideEffects(SideEffects::None());
6859 }
6860 }
6861
6862 // Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
6863 inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
6864 // The special input is used for PC-relative loads on some architectures,
6865 // including literal pool loads, which are PC-relative too.
6866 DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
6867 GetLoadKind() == LoadKind::kBootImageRelRo ||
6868 GetLoadKind() == LoadKind::kAppImageRelRo ||
6869 GetLoadKind() == LoadKind::kBssEntry ||
6870 GetLoadKind() == LoadKind::kBssEntryPublic ||
6871 GetLoadKind() == LoadKind::kBssEntryPackage ||
6872 GetLoadKind() == LoadKind::kJitBootImageAddress) << GetLoadKind();
6873 DCHECK(special_input_.GetInstruction() == nullptr);
6874 special_input_ = HUserRecord<HInstruction*>(special_input);
6875 special_input->AddUseAt(GetBlock()->GetGraph()->GetAllocator(), this, 0);
6876 }
6877
6878 class HLoadString final : public HInstruction {
6879 public:
6880 // Determines how to load the String.
6881 enum class LoadKind {
6882 // Use PC-relative boot image String* address that will be known at link time.
6883 // Used for boot image strings referenced by boot image code.
6884 kBootImageLinkTimePcRelative,
6885
6886 // Load from a boot image entry in the .data.img.rel.ro using a PC-relative load.
6887 // Used for boot image strings referenced by apps in AOT-compiled code.
6888 kBootImageRelRo,
6889
6890 // Load from an entry in the .bss section using a PC-relative load.
6891 // Used for strings outside boot image referenced by AOT-compiled app and boot image code.
6892 kBssEntry,
6893
6894 // Use a known boot image String* address, embedded in the code by the codegen.
6895 // Used for boot image strings referenced by apps in JIT-compiled code.
6896 kJitBootImageAddress,
6897
6898 // Load from the root table associated with the JIT compiled method.
6899 kJitTableAddress,
6900
6901 // Load using a simple runtime call. This is the fall-back load kind when
6902 // the codegen is unable to use another appropriate kind.
6903 kRuntimeCall,
6904
6905 kLast = kRuntimeCall,
6906 };
6907
6908 HLoadString(HCurrentMethod* current_method,
6909 dex::StringIndex string_index,
6910 const DexFile& dex_file,
6911 uint32_t dex_pc)
6912 : HInstruction(kLoadString,
6913 DataType::Type::kReference,
6914 SideEffectsForArchRuntimeCalls(),
6915 dex_pc),
6916 special_input_(HUserRecord<HInstruction*>(current_method)),
6917 string_index_(string_index),
6918 dex_file_(dex_file) {
6919 SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
6920 }
6921
6922 bool IsClonable() const override { return true; }
6923 bool NeedsBss() const override {
6924 return GetLoadKind() == LoadKind::kBssEntry;
6925 }
6926
6927 void SetLoadKind(LoadKind load_kind);
6928
6929 LoadKind GetLoadKind() const {
6930 return GetPackedField<LoadKindField>();
6931 }
6932
6933 bool HasPcRelativeLoadKind() const {
6934 return GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
6935 GetLoadKind() == LoadKind::kBootImageRelRo ||
6936 GetLoadKind() == LoadKind::kBssEntry;
6937 }
6938
6939 const DexFile& GetDexFile() const {
6940 return dex_file_;
6941 }
6942
6943 dex::StringIndex GetStringIndex() const {
6944 return string_index_;
6945 }
6946
6947 Handle<mirror::String> GetString() const {
6948 return string_;
6949 }
6950
6951 void SetString(Handle<mirror::String> str) {
6952 string_ = str;
6953 }
6954
6955 bool CanBeMoved() const override { return true; }
6956
6957 bool InstructionDataEquals(const HInstruction* other) const override;
6958
6959 size_t ComputeHashCode() const override { return string_index_.index_; }
6960
6961 // Will call the runtime if we need to load the string through
6962 // the dex cache and the string is not guaranteed to be there yet.
6963 bool NeedsEnvironment() const override {
6964 LoadKind load_kind = GetLoadKind();
6965 if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
6966 load_kind == LoadKind::kBootImageRelRo ||
6967 load_kind == LoadKind::kJitBootImageAddress ||
6968 load_kind == LoadKind::kJitTableAddress) {
6969 return false;
6970 }
6971 return true;
6972 }
6973
6974 bool CanBeNull() const override { return false; }
6975 bool CanThrow() const override { return NeedsEnvironment(); }
6976
6977 static SideEffects SideEffectsForArchRuntimeCalls() {
6978 return SideEffects::CanTriggerGC();
6979 }
6980
6981 void AddSpecialInput(HInstruction* special_input);
6982
6983 using HInstruction::GetInputRecords; // Keep the const version visible.
6984 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
6985 return ArrayRef<HUserRecord<HInstruction*>>(
6986 &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
6987 }
6988
6989 DECLARE_INSTRUCTION(LoadString);
6990
6991 protected:
6992 DEFAULT_COPY_CONSTRUCTOR(LoadString);
6993
6994 private:
6995 static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits;
6996 static constexpr size_t kFieldLoadKindSize =
6997 MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast));
6998 static constexpr size_t kNumberOfLoadStringPackedBits = kFieldLoadKind + kFieldLoadKindSize;
6999 static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
7000 using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
7001
7002 void SetLoadKindInternal(LoadKind load_kind);
7003
7004 // The special input is the HCurrentMethod for kRuntimeCall.
7005 // For other load kinds it's empty or possibly some architecture-specific instruction
7006 // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
7007 HUserRecord<HInstruction*> special_input_;
7008
7009 dex::StringIndex string_index_;
7010 const DexFile& dex_file_;
7011
7012 Handle<mirror::String> string_;
7013 };
7014 std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
7015
7016 // Note: defined outside class to see operator<<(., HLoadString::LoadKind).
7017 inline void HLoadString::SetLoadKind(LoadKind load_kind) {
7018 // The load kind should be determined before inserting the instruction to the graph.
7019 DCHECK(GetBlock() == nullptr);
7020 DCHECK(GetEnvironment() == nullptr);
7021 DCHECK_EQ(GetLoadKind(), LoadKind::kRuntimeCall);
7022 SetPackedField<LoadKindField>(load_kind);
7023 if (load_kind != LoadKind::kRuntimeCall) {
7024 special_input_ = HUserRecord<HInstruction*>(nullptr);
7025 }
7026 if (!NeedsEnvironment()) {
7027 SetSideEffects(SideEffects::None());
7028 }
7029 }
7030
7031 // Note: defined outside class to see operator<<(., HLoadString::LoadKind).
7032 inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
7033 // The special input is used for PC-relative loads on some architectures,
7034 // including literal pool loads, which are PC-relative too.
7035 DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
7036 GetLoadKind() == LoadKind::kBootImageRelRo ||
7037 GetLoadKind() == LoadKind::kBssEntry ||
7038 GetLoadKind() == LoadKind::kJitBootImageAddress) << GetLoadKind();
7039 // HLoadString::GetInputRecords() returns an empty array at this point,
7040 // so use the GetInputRecords() from the base class to set the input record.
7041 DCHECK(special_input_.GetInstruction() == nullptr);
7042 special_input_ = HUserRecord<HInstruction*>(special_input);
7043 special_input->AddUseAt(GetBlock()->GetGraph()->GetAllocator(), this, 0);
7044 }
7045
7046 class HLoadMethodHandle final : public HInstruction {
7047 public:
7048 HLoadMethodHandle(HCurrentMethod* current_method,
7049 uint16_t method_handle_idx,
7050 const DexFile& dex_file,
7051 uint32_t dex_pc)
7052 : HInstruction(kLoadMethodHandle,
7053 DataType::Type::kReference,
7054 SideEffectsForArchRuntimeCalls(),
7055 dex_pc),
7056 special_input_(HUserRecord<HInstruction*>(current_method)),
7057 method_handle_idx_(method_handle_idx),
7058 dex_file_(dex_file) {
7059 }
7060
7061 using HInstruction::GetInputRecords; // Keep the const version visible.
7062 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
7063 return ArrayRef<HUserRecord<HInstruction*>>(
7064 &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
7065 }
7066
7067 bool IsClonable() const override { return true; }
7068
7069 uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
7070
7071 const DexFile& GetDexFile() const { return dex_file_; }
7072
7073 static SideEffects SideEffectsForArchRuntimeCalls() {
7074 return SideEffects::CanTriggerGC();
7075 }
7076
7077 bool CanThrow() const override { return true; }
7078
7079 bool NeedsEnvironment() const override { return true; }
7080
7081 DECLARE_INSTRUCTION(LoadMethodHandle);
7082
7083 protected:
7084 DEFAULT_COPY_CONSTRUCTOR(LoadMethodHandle);
7085
7086 private:
7087 // The special input is the HCurrentMethod for kRuntimeCall.
7088 HUserRecord<HInstruction*> special_input_;
7089
7090 const uint16_t method_handle_idx_;
7091 const DexFile& dex_file_;
7092 };
7093
7094 class HLoadMethodType final : public HInstruction {
7095 public:
7096 // Determines how to load the MethodType.
7097 enum class LoadKind {
7098 // Load from an entry in the .bss section using a PC-relative load.
7099 kBssEntry,
7100 // Load from the root table associated with the JIT compiled method.
7101 kJitTableAddress,
7102 // Load using a single runtime call.
7103 kRuntimeCall,
7104
7105 kLast = kRuntimeCall,
7106 };
7107
7108 HLoadMethodType(HCurrentMethod* current_method,
7109 dex::ProtoIndex proto_index,
7110 const DexFile& dex_file,
7111 uint32_t dex_pc)
7112 : HInstruction(kLoadMethodType,
7113 DataType::Type::kReference,
7114 SideEffectsForArchRuntimeCalls(),
7115 dex_pc),
7116 special_input_(HUserRecord<HInstruction*>(current_method)),
7117 proto_index_(proto_index),
7118 dex_file_(dex_file) {
7119 SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
7120 }
7121
7122 using HInstruction::GetInputRecords; // Keep the const version visible.
7123 ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
7124 return ArrayRef<HUserRecord<HInstruction*>>(
7125 &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
7126 }
7127
7128 bool IsClonable() const override { return true; }
7129
7130 void SetLoadKind(LoadKind load_kind);
7131
7132 LoadKind GetLoadKind() const {
7133 return GetPackedField<LoadKindField>();
7134 }
7135
7136 dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
7137
7138 Handle<mirror::MethodType> GetMethodType() const { return method_type_; }
7139
7140 void SetMethodType(Handle<mirror::MethodType> method_type) { method_type_ = method_type; }
7141
7142 const DexFile& GetDexFile() const { return dex_file_; }
7143
7144 static SideEffects SideEffectsForArchRuntimeCalls() {
7145 return SideEffects::CanTriggerGC();
7146 }
7147
7148 bool CanThrow() const override { return true; }
7149
7150 bool NeedsEnvironment() const override { return true; }
7151
7152 DECLARE_INSTRUCTION(LoadMethodType);
7153
7154 protected:
7155 DEFAULT_COPY_CONSTRUCTOR(LoadMethodType);
7156
7157 private:
7158 static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits;
7159 static constexpr size_t kFieldLoadKindSize =
7160 MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast));
7161 static constexpr size_t kNumberOfLoadMethodTypePackedBits = kFieldLoadKind + kFieldLoadKindSize;
7162 static_assert(kNumberOfLoadMethodTypePackedBits <= kMaxNumberOfPackedBits,
7163 "Too many packed fields.");
7164 using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
7165
7166 // The special input is the HCurrentMethod for kRuntimeCall.
7167 HUserRecord<HInstruction*> special_input_;
7168
7169 const dex::ProtoIndex proto_index_;
7170 const DexFile& dex_file_;
7171
7172 Handle<mirror::MethodType> method_type_;
7173 };
7174
7175 std::ostream& operator<<(std::ostream& os, HLoadMethodType::LoadKind rhs);
7176
7177 // Note: defined outside class to see operator<<(., HLoadMethodType::LoadKind).
7178 inline void HLoadMethodType::SetLoadKind(LoadKind load_kind) {
7179 // The load kind should be determined before inserting the instruction to the graph.
7180 DCHECK(GetBlock() == nullptr);
7181 DCHECK(GetEnvironment() == nullptr);
7182 DCHECK_EQ(GetLoadKind(), LoadKind::kRuntimeCall);
7183 DCHECK_IMPLIES(GetLoadKind() == LoadKind::kJitTableAddress, GetMethodType() != nullptr);
7184 SetPackedField<LoadKindField>(load_kind);
7185 }
7186
7187 /**
7188 * Performs an initialization check on its Class object input.
7189 */
7190 class HClinitCheck final : public HExpression<1> {
7191 public:
7192 HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
7193 : HExpression(
7194 kClinitCheck,
7195 DataType::Type::kReference,
7196 SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
7197 dex_pc) {
7198 SetRawInputAt(0, constant);
7199 }
7200 // TODO: Make ClinitCheck clonable.
7201 bool CanBeMoved() const override { return true; }
7202 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
7203 return true;
7204 }
7205
7206 bool NeedsEnvironment() const override {
7207 // May call runtime to initialize the class.
7208 return true;
7209 }
7210
7211 bool CanThrow() const override { return true; }
7212
7213 HLoadClass* GetLoadClass() const {
7214 DCHECK(InputAt(0)->IsLoadClass());
7215 return InputAt(0)->AsLoadClass();
7216 }
7217
7218 DECLARE_INSTRUCTION(ClinitCheck);
7219
7220
7221 protected:
7222 DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
7223 };
7224
7225 class HStaticFieldGet final : public HExpression<1, HFieldAccess> {
7226 public:
7227 HStaticFieldGet(HInstruction* cls,
7228 ArtField* field,
7229 DataType::Type field_type,
7230 MemberOffset field_offset,
7231 bool is_volatile,
7232 uint32_t field_idx,
7233 uint16_t declaring_class_def_index,
7234 const DexFile& dex_file,
7235 uint32_t dex_pc)
7236 : HExpression(kStaticFieldGet,
7237 SideEffects::FieldReadOfType(field_type, is_volatile),
7238 field,
7239 field_type,
7240 field_offset,
7241 is_volatile,
7242 field_idx,
7243 declaring_class_def_index,
7244 dex_file,
7245 dex_pc) {
7246 SetRawInputAt(0, cls);
7247 }
7248
7249
7250 bool IsClonable() const override { return true; }
7251 bool CanBeMoved() const override { return !IsVolatile(); }
7252
7253 bool InstructionDataEquals(const HInstruction* other) const override {
7254 const HStaticFieldGet* other_get = other->AsStaticFieldGet();
7255 return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
7256 }
7257
7258 size_t ComputeHashCode() const override {
7259 return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
7260 }
7261
7262 void SetType(DataType::Type new_type) {
7263 DCHECK(DataType::IsIntegralType(GetType()));
7264 DCHECK(DataType::IsIntegralType(new_type));
7265 DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
7266 SetPackedField<TypeField>(new_type);
7267 }
7268
7269 DECLARE_INSTRUCTION(StaticFieldGet);
7270
7271 protected:
7272 DEFAULT_COPY_CONSTRUCTOR(StaticFieldGet);
7273 };
7274
7275 class HStaticFieldSet final : public HExpression<2, HFieldAccess> {
7276 public:
7277 HStaticFieldSet(HInstruction* cls,
7278 HInstruction* value,
7279 ArtField* field,
7280 DataType::Type field_type,
7281 MemberOffset field_offset,
7282 bool is_volatile,
7283 uint32_t field_idx,
7284 uint16_t declaring_class_def_index,
7285 const DexFile& dex_file,
7286 uint32_t dex_pc)
7287 : HExpression(kStaticFieldSet,
7288 SideEffects::FieldWriteOfType(field_type, is_volatile),
7289 field,
7290 field_type,
7291 field_offset,
7292 is_volatile,
7293 field_idx,
7294 declaring_class_def_index,
7295 dex_file,
7296 dex_pc) {
7297 SetPackedFlag<kFlagValueCanBeNull>(true);
7298 SetPackedField<WriteBarrierKindField>(
7299 field_type == DataType::Type::kReference
7300 ? WriteBarrierKind::kEmitNotBeingReliedOn
7301 : WriteBarrierKind::kDontEmit);
7302 SetRawInputAt(0, cls);
7303 SetRawInputAt(1, value);
7304 }
7305
7306 bool IsClonable() const override { return true; }
7307
7308 HInstruction* GetValue() const { return InputAt(1); }
7309 bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
7310 void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
7311
7312 WriteBarrierKind GetWriteBarrierKind() { return GetPackedField<WriteBarrierKindField>(); }
7313 void SetWriteBarrierKind(WriteBarrierKind kind) {
7314 DCHECK(kind != WriteBarrierKind::kEmitNotBeingReliedOn)
7315 << "We shouldn't go back to the original value.";
7316 DCHECK_IMPLIES(kind == WriteBarrierKind::kDontEmit,
7317 GetWriteBarrierKind() != WriteBarrierKind::kEmitBeingReliedOn)
7318 << "If a write barrier was relied on by other write barriers, we cannot skip emitting it.";
7319 SetPackedField<WriteBarrierKindField>(kind);
7320 }
7321
7322 DECLARE_INSTRUCTION(StaticFieldSet);
7323
7324 protected:
7325 DEFAULT_COPY_CONSTRUCTOR(StaticFieldSet);
7326
7327 private:
7328 static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
7329 static constexpr size_t kWriteBarrierKind = kFlagValueCanBeNull + 1;
7330 static constexpr size_t kWriteBarrierKindSize =
7331 MinimumBitsToStore(static_cast<size_t>(WriteBarrierKind::kLast));
7332 static constexpr size_t kNumberOfStaticFieldSetPackedBits =
7333 kWriteBarrierKind + kWriteBarrierKindSize;
7334 static_assert(kNumberOfStaticFieldSetPackedBits <= kMaxNumberOfPackedBits,
7335 "Too many packed fields.");
7336
7337 using WriteBarrierKindField =
7338 BitField<WriteBarrierKind, kWriteBarrierKind, kWriteBarrierKindSize>;
7339 };
7340
7341 class HStringBuilderAppend final : public HVariableInputSizeInstruction {
7342 public:
7343 HStringBuilderAppend(HIntConstant* format,
7344 uint32_t number_of_arguments,
7345 uint32_t number_of_out_vregs,
7346 bool has_fp_args,
7347 ArenaAllocator* allocator,
7348 uint32_t dex_pc)
7349 : HVariableInputSizeInstruction(
7350 kStringBuilderAppend,
7351 DataType::Type::kReference,
7352 SideEffects::CanTriggerGC().Union(
7353 // The runtime call may read memory from inputs. It never writes outside
7354 // of the newly allocated result object or newly allocated helper objects,
7355 // except for float/double arguments where we reuse thread-local helper objects.
7356 has_fp_args ? SideEffects::AllWritesAndReads() : SideEffects::AllReads()),
7357 dex_pc,
7358 allocator,
7359 number_of_arguments + /* format */ 1u,
7360 kArenaAllocInvokeInputs),
7361 number_of_out_vregs_(number_of_out_vregs) {
7362 DCHECK_GE(number_of_arguments, 1u); // There must be something to append.
7363 SetRawInputAt(FormatIndex(), format);
7364 }
7365
7366 void SetArgumentAt(size_t index, HInstruction* argument) {
7367 DCHECK_LE(index, GetNumberOfArguments());
7368 SetRawInputAt(index, argument);
7369 }
7370
7371 // Return the number of arguments, excluding the format.
7372 size_t GetNumberOfArguments() const {
7373 DCHECK_GE(InputCount(), 1u);
7374 return InputCount() - 1u;
7375 }
7376
7377 // Return the number of outgoing vregs.
7378 uint32_t GetNumberOfOutVRegs() const { return number_of_out_vregs_; }
7379
7380 size_t FormatIndex() const {
7381 return GetNumberOfArguments();
7382 }
7383
7384 HIntConstant* GetFormat() {
7385 return InputAt(FormatIndex())->AsIntConstant();
7386 }
7387
7388 bool NeedsEnvironment() const override { return true; }
7389
7390 bool CanThrow() const override { return true; }
7391
7392 bool CanBeNull() const override { return false; }
7393
7394 DECLARE_INSTRUCTION(StringBuilderAppend);
7395
7396 protected:
7397 DEFAULT_COPY_CONSTRUCTOR(StringBuilderAppend);
7398
7399 private:
7400 uint32_t number_of_out_vregs_;
7401 };
7402
7403 class HUnresolvedInstanceFieldGet final : public HExpression<1> {
7404 public:
7405 HUnresolvedInstanceFieldGet(HInstruction* obj,
7406 DataType::Type field_type,
7407 uint32_t field_index,
7408 uint32_t dex_pc)
7409 : HExpression(kUnresolvedInstanceFieldGet,
7410 field_type,
7411 SideEffects::AllExceptGCDependency(),
7412 dex_pc),
7413 field_index_(field_index) {
7414 SetRawInputAt(0, obj);
7415 }
7416
7417 bool IsClonable() const override { return true; }
7418 bool NeedsEnvironment() const override { return true; }
7419 bool CanThrow() const override { return true; }
7420
7421 DataType::Type GetFieldType() const { return GetType(); }
7422 uint32_t GetFieldIndex() const { return field_index_; }
7423
7424 DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
7425
7426 protected:
7427 DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldGet);
7428
7429 private:
7430 const uint32_t field_index_;
7431 };
7432
7433 class HUnresolvedInstanceFieldSet final : public HExpression<2> {
7434 public:
7435 HUnresolvedInstanceFieldSet(HInstruction* obj,
7436 HInstruction* value,
7437 DataType::Type field_type,
7438 uint32_t field_index,
7439 uint32_t dex_pc)
7440 : HExpression(kUnresolvedInstanceFieldSet, SideEffects::AllExceptGCDependency(), dex_pc),
7441 field_index_(field_index) {
7442 SetPackedField<FieldTypeField>(field_type);
7443 DCHECK_EQ(DataType::Kind(field_type), DataType::Kind(value->GetType()));
7444 SetRawInputAt(0, obj);
7445 SetRawInputAt(1, value);
7446 }
7447
7448 bool IsClonable() const override { return true; }
7449 bool NeedsEnvironment() const override { return true; }
7450 bool CanThrow() const override { return true; }
7451
7452 DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
7453 uint32_t GetFieldIndex() const { return field_index_; }
7454
7455 DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
7456
7457 protected:
7458 DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldSet);
7459
7460 private:
7461 static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
7462 static constexpr size_t kFieldFieldTypeSize =
7463 MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
7464 static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits =
7465 kFieldFieldType + kFieldFieldTypeSize;
7466 static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
7467 "Too many packed fields.");
7468 using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
7469
7470 const uint32_t field_index_;
7471 };
7472
7473 class HUnresolvedStaticFieldGet final : public HExpression<0> {
7474 public:
7475 HUnresolvedStaticFieldGet(DataType::Type field_type,
7476 uint32_t field_index,
7477 uint32_t dex_pc)
7478 : HExpression(kUnresolvedStaticFieldGet,
7479 field_type,
7480 SideEffects::AllExceptGCDependency(),
7481 dex_pc),
7482 field_index_(field_index) {
7483 }
7484
7485 bool IsClonable() const override { return true; }
7486 bool NeedsEnvironment() const override { return true; }
7487 bool CanThrow() const override { return true; }
7488
7489 DataType::Type GetFieldType() const { return GetType(); }
7490 uint32_t GetFieldIndex() const { return field_index_; }
7491
7492 DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
7493
7494 protected:
7495 DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldGet);
7496
7497 private:
7498 const uint32_t field_index_;
7499 };
7500
7501 class HUnresolvedStaticFieldSet final : public HExpression<1> {
7502 public:
7503 HUnresolvedStaticFieldSet(HInstruction* value,
7504 DataType::Type field_type,
7505 uint32_t field_index,
7506 uint32_t dex_pc)
7507 : HExpression(kUnresolvedStaticFieldSet, SideEffects::AllExceptGCDependency(), dex_pc),
7508 field_index_(field_index) {
7509 SetPackedField<FieldTypeField>(field_type);
7510 DCHECK_EQ(DataType::Kind(field_type), DataType::Kind(value->GetType()));
7511 SetRawInputAt(0, value);
7512 }
7513
7514 bool IsClonable() const override { return true; }
7515 bool NeedsEnvironment() const override { return true; }
7516 bool CanThrow() const override { return true; }
7517
7518 DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
7519 uint32_t GetFieldIndex() const { return field_index_; }
7520
7521 DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
7522
7523 protected:
7524 DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldSet);
7525
7526 private:
7527 static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
7528 static constexpr size_t kFieldFieldTypeSize =
7529 MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
7530 static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits =
7531 kFieldFieldType + kFieldFieldTypeSize;
7532 static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
7533 "Too many packed fields.");
7534 using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
7535
7536 const uint32_t field_index_;
7537 };
7538
7539 // Implement the move-exception DEX instruction.
7540 class HLoadException final : public HExpression<0> {
7541 public:
7542 explicit HLoadException(uint32_t dex_pc = kNoDexPc)
7543 : HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) {
7544 }
7545
7546 bool CanBeNull() const override { return false; }
7547
7548 DECLARE_INSTRUCTION(LoadException);
7549
7550 protected:
7551 DEFAULT_COPY_CONSTRUCTOR(LoadException);
7552 };
7553
7554 // Implicit part of move-exception which clears thread-local exception storage.
7555 // Must not be removed because the runtime expects the TLS to get cleared.
7556 class HClearException final : public HExpression<0> {
7557 public:
7558 explicit HClearException(uint32_t dex_pc = kNoDexPc)
7559 : HExpression(kClearException, SideEffects::AllWrites(), dex_pc) {
7560 }
7561
7562 DECLARE_INSTRUCTION(ClearException);
7563
7564 protected:
7565 DEFAULT_COPY_CONSTRUCTOR(ClearException);
7566 };
7567
7568 class HThrow final : public HExpression<1> {
7569 public:
7570 HThrow(HInstruction* exception, uint32_t dex_pc)
7571 : HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) {
7572 SetRawInputAt(0, exception);
7573 }
7574
7575 bool IsControlFlow() const override { return true; }
7576
7577 bool NeedsEnvironment() const override { return true; }
7578
7579 bool CanThrow() const override { return true; }
7580
7581 bool AlwaysThrows() const override { return true; }
7582
7583 DECLARE_INSTRUCTION(Throw);
7584
7585 protected:
7586 DEFAULT_COPY_CONSTRUCTOR(Throw);
7587 };
7588
7589 /**
7590 * Implementation strategies for the code generator of a HInstanceOf
7591 * or `HCheckCast`.
7592 */
7593 enum class TypeCheckKind { // private marker to avoid generate-operator-out.py from processing.
7594 kUnresolvedCheck, // Check against an unresolved type.
7595 kExactCheck, // Can do a single class compare.
7596 kClassHierarchyCheck, // Can just walk the super class chain.
7597 kAbstractClassCheck, // Can just walk the super class chain, starting one up.
7598 kInterfaceCheck, // No optimization yet when checking against an interface.
7599 kArrayObjectCheck, // Can just check if the array is not primitive.
7600 kArrayCheck, // No optimization yet when checking against a generic array.
7601 kBitstringCheck, // Compare the type check bitstring.
7602 kLast = kArrayCheck
7603 };
7604
7605 std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs);
7606
7607 // Note: HTypeCheckInstruction is just a helper class, not an abstract instruction with an
7608 // `IsTypeCheckInstruction()`. (New virtual methods in the HInstruction class have a high cost.)
7609 class HTypeCheckInstruction : public HVariableInputSizeInstruction {
7610 public:
7611 HTypeCheckInstruction(InstructionKind kind,
7612 DataType::Type type,
7613 HInstruction* object,
7614 HInstruction* target_class_or_null,
7615 TypeCheckKind check_kind,
7616 Handle<mirror::Class> klass,
7617 uint32_t dex_pc,
7618 ArenaAllocator* allocator,
7619 HIntConstant* bitstring_path_to_root,
7620 HIntConstant* bitstring_mask,
7621 SideEffects side_effects)
7622 : HVariableInputSizeInstruction(
7623 kind,
7624 type,
7625 side_effects,
7626 dex_pc,
7627 allocator,
7628 /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
7629 kArenaAllocTypeCheckInputs),
7630 klass_(klass) {
7631 SetPackedField<TypeCheckKindField>(check_kind);
7632 SetPackedFlag<kFlagMustDoNullCheck>(true);
7633 SetPackedFlag<kFlagValidTargetClassRTI>(false);
7634 SetRawInputAt(0, object);
7635 SetRawInputAt(1, target_class_or_null);
7636 DCHECK_EQ(check_kind == TypeCheckKind::kBitstringCheck, bitstring_path_to_root != nullptr);
7637 DCHECK_EQ(check_kind == TypeCheckKind::kBitstringCheck, bitstring_mask != nullptr);
7638 if (check_kind == TypeCheckKind::kBitstringCheck) {
7639 DCHECK(target_class_or_null->IsNullConstant());
7640 SetRawInputAt(2, bitstring_path_to_root);
7641 SetRawInputAt(3, bitstring_mask);
7642 } else {
7643 DCHECK(target_class_or_null->IsLoadClass());
7644 }
7645 }
7646
7647 HLoadClass* GetTargetClass() const {
7648 DCHECK_NE(GetTypeCheckKind(), TypeCheckKind::kBitstringCheck);
7649 HInstruction* load_class = InputAt(1);
7650 DCHECK(load_class->IsLoadClass());
7651 return load_class->AsLoadClass();
7652 }
7653
7654 uint32_t GetBitstringPathToRoot() const {
7655 DCHECK_EQ(GetTypeCheckKind(), TypeCheckKind::kBitstringCheck);
7656 HInstruction* path_to_root = InputAt(2);
7657 DCHECK(path_to_root->IsIntConstant());
7658 return static_cast<uint32_t>(path_to_root->AsIntConstant()->GetValue());
7659 }
7660
7661 uint32_t GetBitstringMask() const {
7662 DCHECK_EQ(GetTypeCheckKind(), TypeCheckKind::kBitstringCheck);
7663 HInstruction* mask = InputAt(3);
7664 DCHECK(mask->IsIntConstant());
7665 return static_cast<uint32_t>(mask->AsIntConstant()->GetValue());
7666 }
7667
7668 bool IsClonable() const override { return true; }
7669 bool CanBeMoved() const override { return true; }
7670
7671 bool InstructionDataEquals(const HInstruction* other) const override {
7672 DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName();
7673 return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields();
7674 }
7675
7676 bool MustDoNullCheck() const { return GetPackedFlag<kFlagMustDoNullCheck>(); }
7677 void ClearMustDoNullCheck() { SetPackedFlag<kFlagMustDoNullCheck>(false); }
7678 TypeCheckKind GetTypeCheckKind() const { return GetPackedField<TypeCheckKindField>(); }
7679 bool IsExactCheck() const { return GetTypeCheckKind() == TypeCheckKind::kExactCheck; }
7680
7681 ReferenceTypeInfo GetTargetClassRTI() {
7682 if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
7683 // Note: The is_exact flag from the return value should not be used.
7684 return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
7685 } else {
7686 return ReferenceTypeInfo::CreateInvalid();
7687 }
7688 }
7689
7690 // Target class RTI is marked as valid by RTP if the klass_ is admissible.
7691 void SetValidTargetClassRTI() {
7692 DCHECK(klass_ != nullptr);
7693 SetPackedFlag<kFlagValidTargetClassRTI>(true);
7694 }
7695
7696 Handle<mirror::Class> GetClass() const {
7697 return klass_;
7698 }
7699
7700 protected:
7701 DEFAULT_COPY_CONSTRUCTOR(TypeCheckInstruction);
7702
7703 private:
7704 static constexpr size_t kFieldTypeCheckKind = kNumberOfGenericPackedBits;
7705 static constexpr size_t kFieldTypeCheckKindSize =
7706 MinimumBitsToStore(static_cast<size_t>(TypeCheckKind::kLast));
7707 static constexpr size_t kFlagMustDoNullCheck = kFieldTypeCheckKind + kFieldTypeCheckKindSize;
7708 static constexpr size_t kFlagValidTargetClassRTI = kFlagMustDoNullCheck + 1;
7709 static constexpr size_t kNumberOfInstanceOfPackedBits = kFlagValidTargetClassRTI + 1;
7710 static_assert(kNumberOfInstanceOfPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
7711 using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
7712
7713 Handle<mirror::Class> klass_;
7714 };
7715
7716 class HInstanceOf final : public HTypeCheckInstruction {
7717 public:
7718 HInstanceOf(HInstruction* object,
7719 HInstruction* target_class_or_null,
7720 TypeCheckKind check_kind,
7721 Handle<mirror::Class> klass,
7722 uint32_t dex_pc,
7723 ArenaAllocator* allocator,
7724 HIntConstant* bitstring_path_to_root,
7725 HIntConstant* bitstring_mask)
7726 : HTypeCheckInstruction(kInstanceOf,
7727 DataType::Type::kBool,
7728 object,
7729 target_class_or_null,
7730 check_kind,
7731 klass,
7732 dex_pc,
7733 allocator,
7734 bitstring_path_to_root,
7735 bitstring_mask,
7736 SideEffectsForArchRuntimeCalls(check_kind)) {}
7737
7738 bool IsClonable() const override { return true; }
7739
7740 bool NeedsEnvironment() const override {
7741 return CanCallRuntime(GetTypeCheckKind());
7742 }
7743
7744 static bool CanCallRuntime(TypeCheckKind check_kind) {
7745 // TODO: Re-evaluate now that mips codegen has been removed.
7746 return check_kind != TypeCheckKind::kExactCheck;
7747 }
7748
7749 static SideEffects SideEffectsForArchRuntimeCalls(TypeCheckKind check_kind) {
7750 return CanCallRuntime(check_kind) ? SideEffects::CanTriggerGC() : SideEffects::None();
7751 }
7752
7753 DECLARE_INSTRUCTION(InstanceOf);
7754
7755 protected:
7756 DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
7757 };
7758
7759 class HBoundType final : public HExpression<1> {
7760 public:
7761 explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
7762 : HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc),
7763 upper_bound_(ReferenceTypeInfo::CreateInvalid()) {
7764 SetPackedFlag<kFlagUpperCanBeNull>(true);
7765 SetPackedFlag<kFlagCanBeNull>(true);
7766 DCHECK_EQ(input->GetType(), DataType::Type::kReference);
7767 SetRawInputAt(0, input);
7768 }
7769
7770 bool InstructionDataEquals(const HInstruction* other) const override;
7771 bool IsClonable() const override { return true; }
7772
7773 // {Get,Set}Upper* should only be used in reference type propagation.
7774 const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
7775 bool GetUpperCanBeNull() const { return GetPackedFlag<kFlagUpperCanBeNull>(); }
7776 void SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null);
7777
7778 void SetCanBeNull(bool can_be_null) {
7779 DCHECK(GetUpperCanBeNull() || !can_be_null);
7780 SetPackedFlag<kFlagCanBeNull>(can_be_null);
7781 }
7782
7783 bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
7784
7785 DECLARE_INSTRUCTION(BoundType);
7786
7787 protected:
7788 DEFAULT_COPY_CONSTRUCTOR(BoundType);
7789
7790 private:
7791 // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
7792 // is false then CanBeNull() cannot be true).
7793 static constexpr size_t kFlagUpperCanBeNull = kNumberOfGenericPackedBits;
7794 static constexpr size_t kFlagCanBeNull = kFlagUpperCanBeNull + 1;
7795 static constexpr size_t kNumberOfBoundTypePackedBits = kFlagCanBeNull + 1;
7796 static_assert(kNumberOfBoundTypePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
7797
7798 // Encodes the most upper class that this instruction can have. In other words
7799 // it is always the case that GetUpperBound().IsSupertypeOf(GetReferenceType()).
7800 // It is used to bound the type in cases like:
7801 // if (x instanceof ClassX) {
7802 // // uper_bound_ will be ClassX
7803 // }
7804 ReferenceTypeInfo upper_bound_;
7805 };
7806
7807 class HCheckCast final : public HTypeCheckInstruction {
7808 public:
7809 HCheckCast(HInstruction* object,
7810 HInstruction* target_class_or_null,
7811 TypeCheckKind check_kind,
7812 Handle<mirror::Class> klass,
7813 uint32_t dex_pc,
7814 ArenaAllocator* allocator,
7815 HIntConstant* bitstring_path_to_root,
7816 HIntConstant* bitstring_mask)
7817 : HTypeCheckInstruction(kCheckCast,
7818 DataType::Type::kVoid,
7819 object,
7820 target_class_or_null,
7821 check_kind,
7822 klass,
7823 dex_pc,
7824 allocator,
7825 bitstring_path_to_root,
7826 bitstring_mask,
7827 SideEffects::CanTriggerGC()) {}
7828
7829 bool IsClonable() const override { return true; }
7830 bool NeedsEnvironment() const override {
7831 // Instruction may throw a CheckCastError.
7832 return true;
7833 }
7834
7835 bool CanThrow() const override { return true; }
7836
7837 DECLARE_INSTRUCTION(CheckCast);
7838
7839 protected:
7840 DEFAULT_COPY_CONSTRUCTOR(CheckCast);
7841 };
7842
7843 /**
7844 * @brief Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
7845 * @details We define the combined barrier types that are actually required
7846 * by the Java Memory Model, rather than using exactly the terminology from
7847 * the JSR-133 cookbook. These should, in many cases, be replaced by acquire/release
7848 * primitives. Note that the JSR-133 cookbook generally does not deal with
7849 * store atomicity issues, and the recipes there are not always entirely sufficient.
7850 * The current recipe is as follows:
7851 * -# Use AnyStore ~= (LoadStore | StoreStore) ~= release barrier before volatile store.
7852 * -# Use AnyAny barrier after volatile store. (StoreLoad is as expensive.)
7853 * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrier after each volatile load.
7854 * -# Use StoreStore barrier after all stores but before return from any constructor whose
7855 * class has final fields.
7856 * -# Use NTStoreStore to order non-temporal stores with respect to all later
7857 * store-to-memory instructions. Only generated together with non-temporal stores.
7858 */
7859 enum MemBarrierKind {
7860 kAnyStore,
7861 kLoadAny,
7862 kStoreStore,
7863 kAnyAny,
7864 kNTStoreStore,
7865 kLastBarrierKind = kNTStoreStore
7866 };
7867 std::ostream& operator<<(std::ostream& os, MemBarrierKind kind);
7868
7869 class HMemoryBarrier final : public HExpression<0> {
7870 public:
7871 explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
7872 : HExpression(kMemoryBarrier,
7873 SideEffects::AllWritesAndReads(), // Assume write/read on all fields/arrays.
7874 dex_pc) {
7875 SetPackedField<BarrierKindField>(barrier_kind);
7876 }
7877
7878 bool IsClonable() const override { return true; }
7879
7880 MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
7881
7882 DECLARE_INSTRUCTION(MemoryBarrier);
7883
7884 protected:
7885 DEFAULT_COPY_CONSTRUCTOR(MemoryBarrier);
7886
7887 private:
7888 static constexpr size_t kFieldBarrierKind = HInstruction::kNumberOfGenericPackedBits;
7889 static constexpr size_t kFieldBarrierKindSize =
7890 MinimumBitsToStore(static_cast<size_t>(kLastBarrierKind));
7891 static constexpr size_t kNumberOfMemoryBarrierPackedBits =
7892 kFieldBarrierKind + kFieldBarrierKindSize;
7893 static_assert(kNumberOfMemoryBarrierPackedBits <= kMaxNumberOfPackedBits,
7894 "Too many packed fields.");
7895 using BarrierKindField = BitField<MemBarrierKind, kFieldBarrierKind, kFieldBarrierKindSize>;
7896 };
7897
7898 // A constructor fence orders all prior stores to fields that could be accessed via a final field of
7899 // the specified object(s), with respect to any subsequent store that might "publish"
7900 // (i.e. make visible) the specified object to another thread.
7901 //
7902 // JLS 17.5.1 "Semantics of final fields" states that a freeze action happens
7903 // for all final fields (that were set) at the end of the invoked constructor.
7904 //
7905 // The constructor fence models the freeze actions for the final fields of an object
7906 // being constructed (semantically at the end of the constructor). Constructor fences
7907 // have a per-object affinity; two separate objects being constructed get two separate
7908 // constructor fences.
7909 //
7910 // (Note: that if calling a super-constructor or forwarding to another constructor,
7911 // the freezes would happen at the end of *that* constructor being invoked).
7912 //
7913 // The memory model guarantees that when the object being constructed is "published" after
7914 // constructor completion (i.e. escapes the current thread via a store), then any final field
7915 // writes must be observable on other threads (once they observe that publication).
7916 //
7917 // Further, anything written before the freeze, and read by dereferencing through the final field,
7918 // must also be visible (so final object field could itself have an object with non-final fields;
7919 // yet the freeze must also extend to them).
7920 //
7921 // Constructor example:
7922 //
7923 // class HasFinal {
7924 // final int field; Optimizing IR for <init>()V:
7925 // HasFinal() {
7926 // field = 123; HInstanceFieldSet(this, HasFinal.field, 123)
7927 // // freeze(this.field); HConstructorFence(this)
7928 // } HReturn
7929 // }
7930 //
7931 // HConstructorFence can serve double duty as a fence for new-instance/new-array allocations of
7932 // already-initialized classes; in that case the allocation must act as a "default-initializer"
7933 // of the object which effectively writes the class pointer "final field".
7934 //
7935 // For example, we can model default-initialiation as roughly the equivalent of the following:
7936 //
7937 // class Object {
7938 // private final Class header;
7939 // }
7940 //
7941 // Java code: Optimizing IR:
7942 //
7943 // T new_instance<T>() {
7944 // Object obj = allocate_memory(T.class.size); obj = HInvoke(art_quick_alloc_object, T)
7945 // obj.header = T.class; // header write is done by above call.
7946 // // freeze(obj.header) HConstructorFence(obj)
7947 // return (T)obj;
7948 // }
7949 //
7950 // See also:
7951 // * DexCompilationUnit::RequiresConstructorBarrier
7952 // * QuasiAtomic::ThreadFenceForConstructor
7953 //
7954 class HConstructorFence final : public HVariableInputSizeInstruction {
7955 // A fence has variable inputs because the inputs can be removed
7956 // after prepare_for_register_allocation phase.
7957 // (TODO: In the future a fence could freeze multiple objects
7958 // after merging two fences together.)
7959 public:
7960 // `fence_object` is the reference that needs to be protected for correct publication.
7961 //
7962 // It makes sense in the following situations:
7963 // * <init> constructors, it's the "this" parameter (i.e. HParameterValue, s.t. IsThis() == true).
7964 // * new-instance-like instructions, it's the return value (i.e. HNewInstance).
7965 //
7966 // After construction the `fence_object` becomes the 0th input.
7967 // This is not an input in a real sense, but just a convenient place to stash the information
7968 // about the associated object.
7969 HConstructorFence(HInstruction* fence_object,
7970 uint32_t dex_pc,
7971 ArenaAllocator* allocator)
7972 // We strongly suspect there is not a more accurate way to describe the fine-grained reordering
7973 // constraints described in the class header. We claim that these SideEffects constraints
7974 // enforce a superset of the real constraints.
7975 //
7976 // The ordering described above is conservatively modeled with SideEffects as follows:
7977 //
7978 // * To prevent reordering of the publication stores:
7979 // ----> "Reads of objects" is the initial SideEffect.
7980 // * For every primitive final field store in the constructor:
7981 // ----> Union that field's type as a read (e.g. "Read of T") into the SideEffect.
7982 // * If there are any stores to reference final fields in the constructor:
7983 // ----> Use a more conservative "AllReads" SideEffect because any stores to any references
7984 // that are reachable from `fence_object` also need to be prevented for reordering
7985 // (and we do not want to do alias analysis to figure out what those stores are).
7986 //
7987 // In the implementation, this initially starts out as an "all reads" side effect; this is an
7988 // even more conservative approach than the one described above, and prevents all of the
7989 // above reordering without analyzing any of the instructions in the constructor.
7990 //
7991 // If in a later phase we discover that there are no writes to reference final fields,
7992 // we can refine the side effect to a smaller set of type reads (see above constraints).
7993 : HVariableInputSizeInstruction(kConstructorFence,
7994 SideEffects::AllReads(),
7995 dex_pc,
7996 allocator,
7997 /* number_of_inputs= */ 1,
7998 kArenaAllocConstructorFenceInputs) {
7999 DCHECK(fence_object != nullptr);
8000 SetRawInputAt(0, fence_object);
8001 }
8002
8003 // The object associated with this constructor fence.
8004 //
8005 // (Note: This will be null after the prepare_for_register_allocation phase,
8006 // as all constructor fence inputs are removed there).
8007 HInstruction* GetFenceObject() const {
8008 return InputAt(0);
8009 }
8010
8011 // Find all the HConstructorFence uses (`fence_use`) for `this` and:
8012 // - Delete `fence_use` from `this`'s use list.
8013 // - Delete `this` from `fence_use`'s inputs list.
8014 // - If the `fence_use` is dead, remove it from the graph.
8015 //
8016 // A fence is considered dead once it no longer has any uses
8017 // and all of the inputs are dead.
8018 //
8019 // This must *not* be called during/after prepare_for_register_allocation,
8020 // because that removes all the inputs to the fences but the fence is actually
8021 // still considered live.
8022 //
8023 // Returns how many HConstructorFence instructions were removed from graph.
8024 static size_t RemoveConstructorFences(HInstruction* instruction);
8025
8026 // Combine all inputs of `this` and `other` instruction and remove
8027 // `other` from the graph.
8028 //
8029 // Inputs are unique after the merge.
8030 //
8031 // Requirement: `this` must not be the same as `other.
8032 void Merge(HConstructorFence* other);
8033
8034 // Check if this constructor fence is protecting
8035 // an HNewInstance or HNewArray that is also the immediate
8036 // predecessor of `this`.
8037 //
8038 // If `ignore_inputs` is true, then the immediate predecessor doesn't need
8039 // to be one of the inputs of `this`.
8040 //
8041 // Returns the associated HNewArray or HNewInstance,
8042 // or null otherwise.
8043 HInstruction* GetAssociatedAllocation(bool ignore_inputs = false);
8044
8045 DECLARE_INSTRUCTION(ConstructorFence);
8046
8047 protected:
8048 DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
8049 };
8050
8051 class HMonitorOperation final : public HExpression<1> {
8052 public:
8053 enum class OperationKind {
8054 kEnter,
8055 kExit,
8056 kLast = kExit
8057 };
8058
8059 HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
8060 : HExpression(kMonitorOperation,
8061 SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
8062 dex_pc) {
8063 SetPackedField<OperationKindField>(kind);
8064 SetRawInputAt(0, object);
8065 }
8066
8067 // Instruction may go into runtime, so we need an environment.
8068 bool NeedsEnvironment() const override { return true; }
8069
8070 bool CanThrow() const override {
8071 // Verifier guarantees that monitor-exit cannot throw.
8072 // This is important because it allows the HGraphBuilder to remove
8073 // a dead throw-catch loop generated for `synchronized` blocks/methods.
8074 return IsEnter();
8075 }
8076
8077 OperationKind GetOperationKind() const { return GetPackedField<OperationKindField>(); }
8078 bool IsEnter() const { return GetOperationKind() == OperationKind::kEnter; }
8079
8080 DECLARE_INSTRUCTION(MonitorOperation);
8081
8082 protected:
8083 DEFAULT_COPY_CONSTRUCTOR(MonitorOperation);
8084
8085 private:
8086 static constexpr size_t kFieldOperationKind = HInstruction::kNumberOfGenericPackedBits;
8087 static constexpr size_t kFieldOperationKindSize =
8088 MinimumBitsToStore(static_cast<size_t>(OperationKind::kLast));
8089 static constexpr size_t kNumberOfMonitorOperationPackedBits =
8090 kFieldOperationKind + kFieldOperationKindSize;
8091 static_assert(kNumberOfMonitorOperationPackedBits <= HInstruction::kMaxNumberOfPackedBits,
8092 "Too many packed fields.");
8093 using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
8094 };
8095
8096 class HSelect final : public HExpression<3> {
8097 public:
8098 HSelect(HInstruction* condition,
8099 HInstruction* true_value,
8100 HInstruction* false_value,
8101 uint32_t dex_pc)
8102 : HExpression(kSelect, HPhi::ToPhiType(true_value->GetType()), SideEffects::None(), dex_pc) {
8103 DCHECK_EQ(HPhi::ToPhiType(true_value->GetType()), HPhi::ToPhiType(false_value->GetType()));
8104
8105 // First input must be `true_value` or `false_value` to allow codegens to
8106 // use the SameAsFirstInput allocation policy. We make it `false_value`, so
8107 // that architectures which implement HSelect as a conditional move also
8108 // will not need to invert the condition.
8109 SetRawInputAt(0, false_value);
8110 SetRawInputAt(1, true_value);
8111 SetRawInputAt(2, condition);
8112 }
8113
8114 bool IsClonable() const override { return true; }
8115 HInstruction* GetFalseValue() const { return InputAt(0); }
8116 HInstruction* GetTrueValue() const { return InputAt(1); }
8117 HInstruction* GetCondition() const { return InputAt(2); }
8118
8119 bool CanBeMoved() const override { return true; }
8120 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
8121 return true;
8122 }
8123
8124 bool CanBeNull() const override {
8125 return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull();
8126 }
8127
8128 void UpdateType() {
8129 DCHECK_EQ(HPhi::ToPhiType(GetTrueValue()->GetType()),
8130 HPhi::ToPhiType(GetFalseValue()->GetType()));
8131 SetPackedField<TypeField>(HPhi::ToPhiType(GetTrueValue()->GetType()));
8132 }
8133
8134 DECLARE_INSTRUCTION(Select);
8135
8136 protected:
8137 DEFAULT_COPY_CONSTRUCTOR(Select);
8138 };
8139
8140 class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
8141 public:
8142 MoveOperands(Location source,
8143 Location destination,
8144 DataType::Type type,
8145 HInstruction* instruction)
8146 : source_(source), destination_(destination), type_(type), instruction_(instruction) {}
8147
8148 Location GetSource() const { return source_; }
8149 Location GetDestination() const { return destination_; }
8150
8151 void SetSource(Location value) { source_ = value; }
8152 void SetDestination(Location value) { destination_ = value; }
8153
8154 // The parallel move resolver marks moves as "in-progress" by clearing the
8155 // destination (but not the source).
8156 Location MarkPending() {
8157 DCHECK(!IsPending());
8158 Location dest = destination_;
8159 destination_ = Location::NoLocation();
8160 return dest;
8161 }
8162
8163 void ClearPending(Location dest) {
8164 DCHECK(IsPending());
8165 destination_ = dest;
8166 }
8167
8168 bool IsPending() const {
8169 DCHECK(source_.IsValid() || destination_.IsInvalid());
8170 return destination_.IsInvalid() && source_.IsValid();
8171 }
8172
8173 // True if this blocks a move from the given location.
8174 bool Blocks(Location loc) const {
8175 return !IsEliminated() && source_.OverlapsWith(loc);
8176 }
8177
8178 // A move is redundant if it's been eliminated, if its source and
8179 // destination are the same, or if its destination is unneeded.
8180 bool IsRedundant() const {
8181 return IsEliminated() || destination_.IsInvalid() || source_.Equals(destination_);
8182 }
8183
8184 // We clear both operands to indicate move that's been eliminated.
8185 void Eliminate() {
8186 source_ = destination_ = Location::NoLocation();
8187 }
8188
8189 bool IsEliminated() const {
8190 DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
8191 return source_.IsInvalid();
8192 }
8193
8194 DataType::Type GetType() const { return type_; }
8195
8196 bool Is64BitMove() const {
8197 return DataType::Is64BitType(type_);
8198 }
8199
8200 HInstruction* GetInstruction() const { return instruction_; }
8201
8202 private:
8203 Location source_;
8204 Location destination_;
8205 // The type this move is for.
8206 DataType::Type type_;
8207 // The instruction this move is assocatied with. Null when this move is
8208 // for moving an input in the expected locations of user (including a phi user).
8209 // This is only used in debug mode, to ensure we do not connect interval siblings
8210 // in the same parallel move.
8211 HInstruction* instruction_;
8212 };
8213
8214 std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs);
8215
8216 static constexpr size_t kDefaultNumberOfMoves = 4;
8217
8218 class HParallelMove final : public HExpression<0> {
8219 public:
8220 explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
8221 : HExpression(kParallelMove, SideEffects::None(), dex_pc),
8222 moves_(allocator->Adapter(kArenaAllocMoveOperands)) {
8223 moves_.reserve(kDefaultNumberOfMoves);
8224 }
8225
8226 void AddMove(Location source,
8227 Location destination,
8228 DataType::Type type,
8229 HInstruction* instruction) {
8230 DCHECK(source.IsValid());
8231 DCHECK(destination.IsValid());
8232 if (kIsDebugBuild) {
8233 if (instruction != nullptr) {
8234 for (const MoveOperands& move : moves_) {
8235 if (move.GetInstruction() == instruction) {
8236 // Special case the situation where the move is for the spill slot
8237 // of the instruction.
8238 if ((GetPrevious() == instruction)
8239 || ((GetPrevious() == nullptr)
8240 && instruction->IsPhi()
8241 && instruction->GetBlock() == GetBlock())) {
8242 DCHECK_NE(destination.GetKind(), move.GetDestination().GetKind())
8243 << "Doing parallel moves for the same instruction.";
8244 } else {
8245 DCHECK(false) << "Doing parallel moves for the same instruction.";
8246 }
8247 }
8248 }
8249 }
8250 for (const MoveOperands& move : moves_) {
8251 DCHECK(!destination.OverlapsWith(move.GetDestination()))
8252 << "Overlapped destination for two moves in a parallel move: "
8253 << move.GetSource() << " ==> " << move.GetDestination() << " and "
8254 << source << " ==> " << destination << " for " << SafePrint(instruction);
8255 }
8256 }
8257 moves_.emplace_back(source, destination, type, instruction);
8258 }
8259
8260 MoveOperands* MoveOperandsAt(size_t index) {
8261 return &moves_[index];
8262 }
8263
8264 size_t NumMoves() const { return moves_.size(); }
8265
8266 DECLARE_INSTRUCTION(ParallelMove);
8267
8268 protected:
8269 DEFAULT_COPY_CONSTRUCTOR(ParallelMove);
8270
8271 private:
8272 ArenaVector<MoveOperands> moves_;
8273 };
8274
8275 class HBitwiseNegatedRight final : public HBinaryOperation {
8276 public:
8277 HBitwiseNegatedRight(DataType::Type result_type,
8278 InstructionKind op,
8279 HInstruction* left,
8280 HInstruction* right,
8281 uint32_t dex_pc = kNoDexPc)
8282 : HBinaryOperation(
8283 kBitwiseNegatedRight, result_type, left, right, SideEffects::None(), dex_pc),
8284 op_kind_(op) {
8285 DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op;
8286 }
8287
8288 template <typename T, typename U>
8289 auto Compute(T x, U y) const -> decltype(x & ~y) {
8290 static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value &&
8291 std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value,
8292 "Inconsistent negated bitwise types");
8293 switch (op_kind_) {
8294 case HInstruction::kAnd:
8295 return x & ~y;
8296 case HInstruction::kOr:
8297 return x | ~y;
8298 case HInstruction::kXor:
8299 return x ^ ~y;
8300 default:
8301 LOG(FATAL) << "Unreachable";
8302 UNREACHABLE();
8303 }
8304 }
8305
8306 bool InstructionDataEquals(const HInstruction* other) const override {
8307 return op_kind_ == other->AsBitwiseNegatedRight()->op_kind_;
8308 }
8309
8310 HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
8311 return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
8312 }
8313
8314 HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
8315 return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
8316 }
8317
8318 InstructionKind GetOpKind() const { return op_kind_; }
8319
8320 DECLARE_INSTRUCTION(BitwiseNegatedRight);
8321
8322 protected:
8323 DEFAULT_COPY_CONSTRUCTOR(BitwiseNegatedRight);
8324
8325 private:
8326 // Specifies the bitwise operation, which will be then negated.
8327 const InstructionKind op_kind_;
8328 };
8329
8330 // This instruction computes an intermediate address pointing in the 'middle' of an object. The
8331 // result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
8332 // never used across anything that can trigger GC.
8333 // The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
8334 // So we represent it by the type `DataType::Type::kInt`.
8335 class HIntermediateAddress final : public HExpression<2> {
8336 public:
8337 HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
8338 : HExpression(kIntermediateAddress,
8339 DataType::Type::kInt32,
8340 SideEffects::DependsOnGC(),
8341 dex_pc) {
8342 DCHECK_EQ(DataType::Size(DataType::Type::kInt32),
8343 DataType::Size(DataType::Type::kReference))
8344 << "kPrimInt and kPrimNot have different sizes.";
8345 SetRawInputAt(0, base_address);
8346 SetRawInputAt(1, offset);
8347 }
8348
8349 bool IsClonable() const override { return true; }
8350 bool CanBeMoved() const override { return true; }
8351 bool InstructionDataEquals([[maybe_unused]] const HInstruction* other) const override {
8352 return true;
8353 }
8354 bool IsActualObject() const override { return false; }
8355
8356 HInstruction* GetBaseAddress() const { return InputAt(0); }
8357 HInstruction* GetOffset() const { return InputAt(1); }
8358
8359 DECLARE_INSTRUCTION(IntermediateAddress);
8360
8361 protected:
8362 DEFAULT_COPY_CONSTRUCTOR(IntermediateAddress);
8363 };
8364
8365
8366 } // namespace art
8367
8368 #include "nodes_vector.h"
8369
8370 #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
8371 #include "nodes_shared.h"
8372 #endif
8373 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
8374 #include "nodes_x86.h"
8375 #endif
8376 #if defined(ART_ENABLE_CODEGEN_riscv64)
8377 #include "nodes_riscv64.h"
8378 #endif
8379
8380 namespace art HIDDEN {
8381
8382 class OptimizingCompilerStats;
8383
8384 class HGraphVisitor : public ValueObject {
8385 public:
8386 explicit HGraphVisitor(HGraph* graph, OptimizingCompilerStats* stats = nullptr)
8387 : stats_(stats),
8388 graph_(graph) {}
8389 virtual ~HGraphVisitor() {}
8390
8391 virtual void VisitInstruction([[maybe_unused]] HInstruction* instruction) {}
8392 virtual void VisitBasicBlock(HBasicBlock* block);
8393
8394 // Visit the graph following basic block insertion order.
8395 void VisitInsertionOrder();
8396
8397 // Visit the graph following dominator tree reverse post-order.
8398 void VisitReversePostOrder();
8399
8400 HGraph* GetGraph() const { return graph_; }
8401
8402 // Visit functions for instruction classes.
8403 #define DECLARE_VISIT_INSTRUCTION(name, super) \
8404 virtual void Visit##name(H##name* instr) { VisitInstruction(instr); }
8405
8406 FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
8407
8408 #undef DECLARE_VISIT_INSTRUCTION
8409
8410 protected:
8411 void VisitPhis(HBasicBlock* block);
8412 void VisitNonPhiInstructions(HBasicBlock* block);
8413 void VisitNonPhiInstructionsHandleChanges(HBasicBlock* block);
8414
8415 OptimizingCompilerStats* stats_;
8416
8417 private:
8418 HGraph* const graph_;
8419
8420 DISALLOW_COPY_AND_ASSIGN(HGraphVisitor);
8421 };
8422
8423 class HGraphDelegateVisitor : public HGraphVisitor {
8424 public:
8425 explicit HGraphDelegateVisitor(HGraph* graph, OptimizingCompilerStats* stats = nullptr)
8426 : HGraphVisitor(graph, stats) {}
8427 virtual ~HGraphDelegateVisitor() {}
8428
8429 // Visit functions that delegate to to super class.
8430 #define DECLARE_VISIT_INSTRUCTION(name, super) \
8431 void Visit##name(H##name* instr) override { Visit##super(instr); }
8432
8433 FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
8434
8435 #undef DECLARE_VISIT_INSTRUCTION
8436
8437 private:
8438 DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
8439 };
8440
8441 // Create a clone of the instruction, insert it into the graph; replace the old one with a new
8442 // and remove the old instruction.
8443 HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr);
8444
8445 // Create a clone for each clonable instructions/phis and replace the original with the clone.
8446 //
8447 // Used for testing individual instruction cloner.
8448 class CloneAndReplaceInstructionVisitor final : public HGraphDelegateVisitor {
8449 public:
8450 explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
8451 : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
8452
8453 void VisitInstruction(HInstruction* instruction) override {
8454 if (instruction->IsClonable()) {
8455 ReplaceInstrOrPhiByClone(instruction);
8456 instr_replaced_by_clones_count_++;
8457 }
8458 }
8459
8460 size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count_; }
8461
8462 private:
8463 size_t instr_replaced_by_clones_count_;
8464
8465 DISALLOW_COPY_AND_ASSIGN(CloneAndReplaceInstructionVisitor);
8466 };
8467
8468 // Iterator over the blocks that are part of the loop; includes blocks which are part
8469 // of an inner loop. The order in which the blocks are iterated is on their
8470 // block id.
8471 class HBlocksInLoopIterator : public ValueObject {
8472 public:
8473 explicit HBlocksInLoopIterator(const HLoopInformation& info)
8474 : blocks_in_loop_(info.GetBlocks()),
8475 blocks_(info.GetHeader()->GetGraph()->GetBlocks()),
8476 index_(0) {
8477 if (!blocks_in_loop_.IsBitSet(index_)) {
8478 Advance();
8479 }
8480 }
8481
8482 bool Done() const { return index_ == blocks_.size(); }
8483 HBasicBlock* Current() const { return blocks_[index_]; }
8484 void Advance() {
8485 ++index_;
8486 for (size_t e = blocks_.size(); index_ < e; ++index_) {
8487 if (blocks_in_loop_.IsBitSet(index_)) {
8488 break;
8489 }
8490 }
8491 }
8492
8493 private:
8494 const BitVector& blocks_in_loop_;
8495 const ArenaVector<HBasicBlock*>& blocks_;
8496 size_t index_;
8497
8498 DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopIterator);
8499 };
8500
8501 // Iterator over the blocks that are part of the loop; includes blocks which are part
8502 // of an inner loop. The order in which the blocks are iterated is reverse
8503 // post order.
8504 class HBlocksInLoopReversePostOrderIterator : public ValueObject {
8505 public:
8506 explicit HBlocksInLoopReversePostOrderIterator(const HLoopInformation& info)
8507 : blocks_in_loop_(info.GetBlocks()),
8508 blocks_(info.GetHeader()->GetGraph()->GetReversePostOrder()),
8509 index_(0) {
8510 if (!blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
8511 Advance();
8512 }
8513 }
8514
8515 bool Done() const { return index_ == blocks_.size(); }
8516 HBasicBlock* Current() const { return blocks_[index_]; }
8517 void Advance() {
8518 ++index_;
8519 for (size_t e = blocks_.size(); index_ < e; ++index_) {
8520 if (blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
8521 break;
8522 }
8523 }
8524 }
8525
8526 private:
8527 const BitVector& blocks_in_loop_;
8528 const ArenaVector<HBasicBlock*>& blocks_;
8529 size_t index_;
8530
8531 DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopReversePostOrderIterator);
8532 };
8533
8534 // Iterator over the blocks that are part of the loop; includes blocks which are part
8535 // of an inner loop. The order in which the blocks are iterated is post order.
8536 class HBlocksInLoopPostOrderIterator : public ValueObject {
8537 public:
8538 explicit HBlocksInLoopPostOrderIterator(const HLoopInformation& info)
8539 : blocks_in_loop_(info.GetBlocks()),
8540 blocks_(info.GetHeader()->GetGraph()->GetReversePostOrder()),
8541 index_(blocks_.size() - 1) {
8542 if (!blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
8543 Advance();
8544 }
8545 }
8546
8547 bool Done() const { return index_ < 0; }
8548 HBasicBlock* Current() const { return blocks_[index_]; }
8549 void Advance() {
8550 --index_;
8551 for (; index_ >= 0; --index_) {
8552 if (blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
8553 break;
8554 }
8555 }
8556 }
8557
8558 private:
8559 const BitVector& blocks_in_loop_;
8560 const ArenaVector<HBasicBlock*>& blocks_;
8561
8562 int32_t index_;
8563
8564 DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopPostOrderIterator);
8565 };
8566
8567 // Returns int64_t value of a properly typed constant.
8568 inline int64_t Int64FromConstant(HConstant* constant) {
8569 if (constant->IsIntConstant()) {
8570 return constant->AsIntConstant()->GetValue();
8571 } else if (constant->IsLongConstant()) {
8572 return constant->AsLongConstant()->GetValue();
8573 } else {
8574 DCHECK(constant->IsNullConstant()) << constant->DebugName();
8575 return 0;
8576 }
8577 }
8578
8579 // Returns true iff instruction is an integral constant (and sets value on success).
8580 inline bool IsInt64AndGet(HInstruction* instruction, /*out*/ int64_t* value) {
8581 if (instruction->IsIntConstant()) {
8582 *value = instruction->AsIntConstant()->GetValue();
8583 return true;
8584 } else if (instruction->IsLongConstant()) {
8585 *value = instruction->AsLongConstant()->GetValue();
8586 return true;
8587 } else if (instruction->IsNullConstant()) {
8588 *value = 0;
8589 return true;
8590 }
8591 return false;
8592 }
8593
8594 // Returns true iff instruction is the given integral constant.
8595 inline bool IsInt64Value(HInstruction* instruction, int64_t value) {
8596 int64_t val = 0;
8597 return IsInt64AndGet(instruction, &val) && val == value;
8598 }
8599
8600 // Returns true iff instruction is a zero bit pattern.
8601 inline bool IsZeroBitPattern(HInstruction* instruction) {
8602 return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
8603 }
8604
8605 // Implement HInstruction::Is##type() for concrete instructions.
8606 #define INSTRUCTION_TYPE_CHECK(type, super) \
8607 inline bool HInstruction::Is##type() const { return GetKind() == k##type; }
8608 FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
8609 #undef INSTRUCTION_TYPE_CHECK
8610
8611 // Implement HInstruction::Is##type() for abstract instructions.
8612 #define INSTRUCTION_TYPE_CHECK_RESULT(type, super) \
8613 std::is_base_of<BaseType, H##type>::value,
8614 #define INSTRUCTION_TYPE_CHECK(type, super) \
8615 inline bool HInstruction::Is##type() const { \
8616 DCHECK_LT(GetKind(), kLastInstructionKind); \
8617 using BaseType = H##type; \
8618 static constexpr bool results[] = { \
8619 FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK_RESULT) \
8620 }; \
8621 return results[static_cast<size_t>(GetKind())]; \
8622 }
8623
8624 FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
8625 #undef INSTRUCTION_TYPE_CHECK
8626 #undef INSTRUCTION_TYPE_CHECK_RESULT
8627
8628 #define INSTRUCTION_TYPE_CAST(type, super) \
8629 inline const H##type* HInstruction::As##type() const { \
8630 DCHECK(Is##type()); \
8631 return down_cast<const H##type*>(this); \
8632 } \
8633 inline H##type* HInstruction::As##type() { \
8634 DCHECK(Is##type()); \
8635 return down_cast<H##type*>(this); \
8636 } \
8637 inline const H##type* HInstruction::As##type##OrNull() const { \
8638 return Is##type() ? down_cast<const H##type*>(this) : nullptr; \
8639 } \
8640 inline H##type* HInstruction::As##type##OrNull() { \
8641 return Is##type() ? down_cast<H##type*>(this) : nullptr; \
8642 }
8643
8644 FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CAST)
8645 #undef INSTRUCTION_TYPE_CAST
8646
8647
8648 // Create space in `blocks` for adding `number_of_new_blocks` entries
8649 // starting at location `at`. Blocks after `at` are moved accordingly.
8650 inline void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
8651 size_t number_of_new_blocks,
8652 size_t after) {
8653 DCHECK_LT(after, blocks->size());
8654 size_t old_size = blocks->size();
8655 size_t new_size = old_size + number_of_new_blocks;
8656 blocks->resize(new_size);
8657 std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
8658 }
8659
8660 /*
8661 * Hunt "under the hood" of array lengths (leading to array references),
8662 * null checks (also leading to array references), and new arrays
8663 * (leading to the actual length). This makes it more likely related
8664 * instructions become actually comparable.
8665 */
8666 inline HInstruction* HuntForDeclaration(HInstruction* instruction) {
8667 while (instruction->IsArrayLength() ||
8668 instruction->IsNullCheck() ||
8669 instruction->IsNewArray()) {
8670 instruction = instruction->IsNewArray()
8671 ? instruction->AsNewArray()->GetLength()
8672 : instruction->InputAt(0);
8673 }
8674 return instruction;
8675 }
8676
8677 inline bool IsAddOrSub(const HInstruction* instruction) {
8678 return instruction->IsAdd() || instruction->IsSub();
8679 }
8680
8681 void RemoveEnvironmentUses(HInstruction* instruction);
8682 bool HasEnvironmentUsedByOthers(HInstruction* instruction);
8683 void ResetEnvironmentInputRecords(HInstruction* instruction);
8684
8685 // Detects an instruction that is >= 0. As long as the value is carried by
8686 // a single instruction, arithmetic wrap-around cannot occur.
8687 bool IsGEZero(HInstruction* instruction);
8688
8689 } // namespace art
8690
8691 #endif // ART_COMPILER_OPTIMIZING_NODES_H_
8692