• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "scheduler_arm64.h"
18 
19 #include "code_generator_utils.h"
20 #include "mirror/array-inl.h"
21 #include "mirror/string.h"
22 
23 namespace art HIDDEN {
24 namespace arm64 {
25 
26 static constexpr uint32_t kArm64MemoryLoadLatency = 5;
27 static constexpr uint32_t kArm64MemoryStoreLatency = 3;
28 
29 static constexpr uint32_t kArm64CallInternalLatency = 10;
30 static constexpr uint32_t kArm64CallLatency = 5;
31 
32 // AArch64 instruction latency.
33 // We currently assume that all arm64 CPUs share the same instruction latency list.
34 static constexpr uint32_t kArm64IntegerOpLatency = 2;
35 static constexpr uint32_t kArm64FloatingPointOpLatency = 5;
36 
37 static constexpr uint32_t kArm64DataProcWithShifterOpLatency = 3;
38 static constexpr uint32_t kArm64DivDoubleLatency = 30;
39 static constexpr uint32_t kArm64DivFloatLatency = 15;
40 static constexpr uint32_t kArm64DivIntegerLatency = 5;
41 static constexpr uint32_t kArm64LoadStringInternalLatency = 7;
42 static constexpr uint32_t kArm64MulFloatingPointLatency = 6;
43 static constexpr uint32_t kArm64MulIntegerLatency = 6;
44 static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5;
45 static constexpr uint32_t kArm64BranchLatency = kArm64IntegerOpLatency;
46 
47 static constexpr uint32_t kArm64SIMDFloatingPointOpLatency = 10;
48 static constexpr uint32_t kArm64SIMDIntegerOpLatency = 6;
49 static constexpr uint32_t kArm64SIMDMemoryLoadLatency = 10;
50 static constexpr uint32_t kArm64SIMDMemoryStoreLatency = 6;
51 static constexpr uint32_t kArm64SIMDMulFloatingPointLatency = 12;
52 static constexpr uint32_t kArm64SIMDMulIntegerLatency = 12;
53 static constexpr uint32_t kArm64SIMDReplicateOpLatency = 16;
54 static constexpr uint32_t kArm64SIMDDivDoubleLatency = 60;
55 static constexpr uint32_t kArm64SIMDDivFloatLatency = 30;
56 static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10;
57 
58 class SchedulingLatencyVisitorARM64 final : public SchedulingLatencyVisitor {
59  public:
60   // Default visitor for instructions not handled specifically below.
VisitInstruction(HInstruction *)61   void VisitInstruction([[maybe_unused]] HInstruction*) override {
62     last_visited_latency_ = kArm64IntegerOpLatency;
63   }
64 
65 // We add a second unused parameter to be able to use this macro like the others
66 // defined in `nodes.h`.
67 #define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M)     \
68   M(ArrayGet             , unused)                   \
69   M(ArrayLength          , unused)                   \
70   M(ArraySet             , unused)                   \
71   M(BoundsCheck          , unused)                   \
72   M(Div                  , unused)                   \
73   M(InstanceFieldGet     , unused)                   \
74   M(InstanceOf           , unused)                   \
75   M(LoadString           , unused)                   \
76   M(Mul                  , unused)                   \
77   M(NewArray             , unused)                   \
78   M(NewInstance          , unused)                   \
79   M(Rem                  , unused)                   \
80   M(StaticFieldGet       , unused)                   \
81   M(SuspendCheck         , unused)                   \
82   M(TypeConversion       , unused)                   \
83   M(VecReplicateScalar   , unused)                   \
84   M(VecExtractScalar     , unused)                   \
85   M(VecReduce            , unused)                   \
86   M(VecCnv               , unused)                   \
87   M(VecNeg               , unused)                   \
88   M(VecAbs               , unused)                   \
89   M(VecNot               , unused)                   \
90   M(VecAdd               , unused)                   \
91   M(VecHalvingAdd        , unused)                   \
92   M(VecSub               , unused)                   \
93   M(VecMul               , unused)                   \
94   M(VecDiv               , unused)                   \
95   M(VecMin               , unused)                   \
96   M(VecMax               , unused)                   \
97   M(VecAnd               , unused)                   \
98   M(VecAndNot            , unused)                   \
99   M(VecOr                , unused)                   \
100   M(VecXor               , unused)                   \
101   M(VecShl               , unused)                   \
102   M(VecShr               , unused)                   \
103   M(VecUShr              , unused)                   \
104   M(VecSetScalars        , unused)                   \
105   M(VecMultiplyAccumulate, unused)                   \
106   M(VecLoad              , unused)                   \
107   M(VecStore             , unused)
108 
109 #define FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(M)   \
110   M(BinaryOperation      , unused)                   \
111   M(Invoke               , unused)
112 
113 #define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
114   M(BitwiseNegatedRight, unused)                 \
115   M(MultiplyAccumulate, unused)                  \
116   M(IntermediateAddress, unused)                 \
117   M(IntermediateAddressIndex, unused)            \
118   M(DataProcWithShifterOp, unused)
119 
120 #define DECLARE_VISIT_INSTRUCTION(type, unused)  \
121   void Visit##type(H##type* instruction) override;
122 
123   FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
124   FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
125   FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
126   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
127 
128 #undef DECLARE_VISIT_INSTRUCTION
129 
130  private:
131   void HandleSimpleArithmeticSIMD(HVecOperation *instr);
132   void HandleVecAddress(HVecMemoryOperation* instruction, size_t size);
133 };
134 
VisitBinaryOperation(HBinaryOperation * instr)135 void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) {
136   last_visited_latency_ = DataType::IsFloatingPointType(instr->GetResultType())
137       ? kArm64FloatingPointOpLatency
138       : kArm64IntegerOpLatency;
139 }
140 
VisitBitwiseNegatedRight(HBitwiseNegatedRight *)141 void SchedulingLatencyVisitorARM64::VisitBitwiseNegatedRight(
142     [[maybe_unused]] HBitwiseNegatedRight*) {
143   last_visited_latency_ = kArm64IntegerOpLatency;
144 }
145 
VisitDataProcWithShifterOp(HDataProcWithShifterOp *)146 void SchedulingLatencyVisitorARM64::VisitDataProcWithShifterOp(
147     [[maybe_unused]] HDataProcWithShifterOp*) {
148   last_visited_latency_ = kArm64DataProcWithShifterOpLatency;
149 }
150 
VisitIntermediateAddress(HIntermediateAddress *)151 void SchedulingLatencyVisitorARM64::VisitIntermediateAddress(
152     [[maybe_unused]] HIntermediateAddress*) {
153   // Although the code generated is a simple `add` instruction, we found through empirical results
154   // that spacing it from its use in memory accesses was beneficial.
155   last_visited_latency_ = kArm64IntegerOpLatency + 2;
156 }
157 
VisitIntermediateAddressIndex(HIntermediateAddressIndex * instr)158 void SchedulingLatencyVisitorARM64::VisitIntermediateAddressIndex(
159     [[maybe_unused]] HIntermediateAddressIndex* instr) {
160   // Although the code generated is a simple `add` instruction, we found through empirical results
161   // that spacing it from its use in memory accesses was beneficial.
162   last_visited_latency_ = kArm64DataProcWithShifterOpLatency + 2;
163 }
164 
VisitMultiplyAccumulate(HMultiplyAccumulate *)165 void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate([[maybe_unused]] HMultiplyAccumulate*) {
166   last_visited_latency_ = kArm64MulIntegerLatency;
167 }
168 
VisitArrayGet(HArrayGet * instruction)169 void SchedulingLatencyVisitorARM64::VisitArrayGet(HArrayGet* instruction) {
170   if (!instruction->GetArray()->IsIntermediateAddress()) {
171     // Take the intermediate address computation into account.
172     last_visited_internal_latency_ = kArm64IntegerOpLatency;
173   }
174   last_visited_latency_ = kArm64MemoryLoadLatency;
175 }
176 
VisitArrayLength(HArrayLength *)177 void SchedulingLatencyVisitorARM64::VisitArrayLength([[maybe_unused]] HArrayLength*) {
178   last_visited_latency_ = kArm64MemoryLoadLatency;
179 }
180 
VisitArraySet(HArraySet *)181 void SchedulingLatencyVisitorARM64::VisitArraySet([[maybe_unused]] HArraySet*) {
182   last_visited_latency_ = kArm64MemoryStoreLatency;
183 }
184 
VisitBoundsCheck(HBoundsCheck *)185 void SchedulingLatencyVisitorARM64::VisitBoundsCheck([[maybe_unused]] HBoundsCheck*) {
186   last_visited_internal_latency_ = kArm64IntegerOpLatency;
187   // Users do not use any data results.
188   last_visited_latency_ = 0;
189 }
190 
VisitDiv(HDiv * instr)191 void SchedulingLatencyVisitorARM64::VisitDiv(HDiv* instr) {
192   DataType::Type type = instr->GetResultType();
193   switch (type) {
194     case DataType::Type::kFloat32:
195       last_visited_latency_ = kArm64DivFloatLatency;
196       break;
197     case DataType::Type::kFloat64:
198       last_visited_latency_ = kArm64DivDoubleLatency;
199       break;
200     default:
201       // Follow the code path used by code generation.
202       if (instr->GetRight()->IsConstant()) {
203         int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant());
204         if (imm == 0) {
205           last_visited_internal_latency_ = 0;
206           last_visited_latency_ = 0;
207         } else if (imm == 1 || imm == -1) {
208           last_visited_internal_latency_ = 0;
209           last_visited_latency_ = kArm64IntegerOpLatency;
210         } else if (IsPowerOfTwo(AbsOrMin(imm))) {
211           last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
212           last_visited_latency_ = kArm64IntegerOpLatency;
213         } else {
214           DCHECK(imm <= -2 || imm >= 2);
215           last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
216           last_visited_latency_ = kArm64MulIntegerLatency;
217         }
218       } else {
219         last_visited_latency_ = kArm64DivIntegerLatency;
220       }
221       break;
222   }
223 }
224 
VisitInstanceFieldGet(HInstanceFieldGet *)225 void SchedulingLatencyVisitorARM64::VisitInstanceFieldGet([[maybe_unused]] HInstanceFieldGet*) {
226   last_visited_latency_ = kArm64MemoryLoadLatency;
227 }
228 
VisitInstanceOf(HInstanceOf *)229 void SchedulingLatencyVisitorARM64::VisitInstanceOf([[maybe_unused]] HInstanceOf*) {
230   last_visited_internal_latency_ = kArm64CallInternalLatency;
231   last_visited_latency_ = kArm64IntegerOpLatency;
232 }
233 
VisitInvoke(HInvoke *)234 void SchedulingLatencyVisitorARM64::VisitInvoke([[maybe_unused]] HInvoke*) {
235   last_visited_internal_latency_ = kArm64CallInternalLatency;
236   last_visited_latency_ = kArm64CallLatency;
237 }
238 
VisitLoadString(HLoadString *)239 void SchedulingLatencyVisitorARM64::VisitLoadString([[maybe_unused]] HLoadString*) {
240   last_visited_internal_latency_ = kArm64LoadStringInternalLatency;
241   last_visited_latency_ = kArm64MemoryLoadLatency;
242 }
243 
VisitMul(HMul * instr)244 void SchedulingLatencyVisitorARM64::VisitMul(HMul* instr) {
245   last_visited_latency_ = DataType::IsFloatingPointType(instr->GetResultType())
246       ? kArm64MulFloatingPointLatency
247       : kArm64MulIntegerLatency;
248 }
249 
VisitNewArray(HNewArray *)250 void SchedulingLatencyVisitorARM64::VisitNewArray([[maybe_unused]] HNewArray*) {
251   last_visited_internal_latency_ = kArm64IntegerOpLatency + kArm64CallInternalLatency;
252   last_visited_latency_ = kArm64CallLatency;
253 }
254 
VisitNewInstance(HNewInstance * instruction)255 void SchedulingLatencyVisitorARM64::VisitNewInstance(HNewInstance* instruction) {
256   if (instruction->IsStringAlloc()) {
257     last_visited_internal_latency_ = 2 + kArm64MemoryLoadLatency + kArm64CallInternalLatency;
258   } else {
259     last_visited_internal_latency_ = kArm64CallInternalLatency;
260   }
261   last_visited_latency_ = kArm64CallLatency;
262 }
263 
VisitRem(HRem * instruction)264 void SchedulingLatencyVisitorARM64::VisitRem(HRem* instruction) {
265   if (DataType::IsFloatingPointType(instruction->GetResultType())) {
266     last_visited_internal_latency_ = kArm64CallInternalLatency;
267     last_visited_latency_ = kArm64CallLatency;
268   } else {
269     // Follow the code path used by code generation.
270     if (instruction->GetRight()->IsConstant()) {
271       int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant());
272       if (imm == 0) {
273         last_visited_internal_latency_ = 0;
274         last_visited_latency_ = 0;
275       } else if (imm == 1 || imm == -1) {
276         last_visited_internal_latency_ = 0;
277         last_visited_latency_ = kArm64IntegerOpLatency;
278       } else if (IsPowerOfTwo(AbsOrMin(imm))) {
279         last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
280         last_visited_latency_ = kArm64IntegerOpLatency;
281       } else {
282         DCHECK(imm <= -2 || imm >= 2);
283         last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
284         last_visited_latency_ = kArm64MulIntegerLatency;
285       }
286     } else {
287       last_visited_internal_latency_ = kArm64DivIntegerLatency;
288       last_visited_latency_ = kArm64MulIntegerLatency;
289     }
290   }
291 }
292 
VisitStaticFieldGet(HStaticFieldGet *)293 void SchedulingLatencyVisitorARM64::VisitStaticFieldGet([[maybe_unused]] HStaticFieldGet*) {
294   last_visited_latency_ = kArm64MemoryLoadLatency;
295 }
296 
VisitSuspendCheck(HSuspendCheck * instruction)297 void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
298   HBasicBlock* block = instruction->GetBlock();
299   DCHECK_IMPLIES(block->GetLoopInformation() == nullptr,
300                  block->IsEntryBlock() && instruction->GetNext()->IsGoto());
301   // Users do not use any data results.
302   last_visited_latency_ = 0;
303 }
304 
VisitTypeConversion(HTypeConversion * instr)305 void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) {
306   if (DataType::IsFloatingPointType(instr->GetResultType()) ||
307       DataType::IsFloatingPointType(instr->GetInputType())) {
308     last_visited_latency_ = kArm64TypeConversionFloatingPointIntegerLatency;
309   } else {
310     last_visited_latency_ = kArm64IntegerOpLatency;
311   }
312 }
313 
HandleSimpleArithmeticSIMD(HVecOperation * instr)314 void SchedulingLatencyVisitorARM64::HandleSimpleArithmeticSIMD(HVecOperation *instr) {
315   if (DataType::IsFloatingPointType(instr->GetPackedType())) {
316     last_visited_latency_ = kArm64SIMDFloatingPointOpLatency;
317   } else {
318     last_visited_latency_ = kArm64SIMDIntegerOpLatency;
319   }
320 }
321 
VisitVecReplicateScalar(HVecReplicateScalar * instr)322 void SchedulingLatencyVisitorARM64::VisitVecReplicateScalar(
323     [[maybe_unused]] HVecReplicateScalar* instr) {
324   last_visited_latency_ = kArm64SIMDReplicateOpLatency;
325 }
326 
VisitVecExtractScalar(HVecExtractScalar * instr)327 void SchedulingLatencyVisitorARM64::VisitVecExtractScalar(HVecExtractScalar* instr) {
328   HandleSimpleArithmeticSIMD(instr);
329 }
330 
VisitVecReduce(HVecReduce * instr)331 void SchedulingLatencyVisitorARM64::VisitVecReduce(HVecReduce* instr) {
332   HandleSimpleArithmeticSIMD(instr);
333 }
334 
VisitVecCnv(HVecCnv * instr)335 void SchedulingLatencyVisitorARM64::VisitVecCnv([[maybe_unused]] HVecCnv* instr) {
336   last_visited_latency_ = kArm64SIMDTypeConversionInt2FPLatency;
337 }
338 
VisitVecNeg(HVecNeg * instr)339 void SchedulingLatencyVisitorARM64::VisitVecNeg(HVecNeg* instr) {
340   HandleSimpleArithmeticSIMD(instr);
341 }
342 
VisitVecAbs(HVecAbs * instr)343 void SchedulingLatencyVisitorARM64::VisitVecAbs(HVecAbs* instr) {
344   HandleSimpleArithmeticSIMD(instr);
345 }
346 
VisitVecNot(HVecNot * instr)347 void SchedulingLatencyVisitorARM64::VisitVecNot(HVecNot* instr) {
348   if (instr->GetPackedType() == DataType::Type::kBool) {
349     last_visited_internal_latency_ = kArm64SIMDIntegerOpLatency;
350   }
351   last_visited_latency_ = kArm64SIMDIntegerOpLatency;
352 }
353 
VisitVecAdd(HVecAdd * instr)354 void SchedulingLatencyVisitorARM64::VisitVecAdd(HVecAdd* instr) {
355   HandleSimpleArithmeticSIMD(instr);
356 }
357 
VisitVecHalvingAdd(HVecHalvingAdd * instr)358 void SchedulingLatencyVisitorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instr) {
359   HandleSimpleArithmeticSIMD(instr);
360 }
361 
VisitVecSub(HVecSub * instr)362 void SchedulingLatencyVisitorARM64::VisitVecSub(HVecSub* instr) {
363   HandleSimpleArithmeticSIMD(instr);
364 }
365 
VisitVecMul(HVecMul * instr)366 void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) {
367   if (DataType::IsFloatingPointType(instr->GetPackedType())) {
368     last_visited_latency_ = kArm64SIMDMulFloatingPointLatency;
369   } else {
370     last_visited_latency_ = kArm64SIMDMulIntegerLatency;
371   }
372 }
373 
VisitVecDiv(HVecDiv * instr)374 void SchedulingLatencyVisitorARM64::VisitVecDiv(HVecDiv* instr) {
375   if (instr->GetPackedType() == DataType::Type::kFloat32) {
376     last_visited_latency_ = kArm64SIMDDivFloatLatency;
377   } else {
378     DCHECK(instr->GetPackedType() == DataType::Type::kFloat64);
379     last_visited_latency_ = kArm64SIMDDivDoubleLatency;
380   }
381 }
382 
VisitVecMin(HVecMin * instr)383 void SchedulingLatencyVisitorARM64::VisitVecMin(HVecMin* instr) {
384   HandleSimpleArithmeticSIMD(instr);
385 }
386 
VisitVecMax(HVecMax * instr)387 void SchedulingLatencyVisitorARM64::VisitVecMax(HVecMax* instr) {
388   HandleSimpleArithmeticSIMD(instr);
389 }
390 
VisitVecAnd(HVecAnd * instr)391 void SchedulingLatencyVisitorARM64::VisitVecAnd([[maybe_unused]] HVecAnd* instr) {
392   last_visited_latency_ = kArm64SIMDIntegerOpLatency;
393 }
394 
VisitVecAndNot(HVecAndNot * instr)395 void SchedulingLatencyVisitorARM64::VisitVecAndNot([[maybe_unused]] HVecAndNot* instr) {
396   last_visited_latency_ = kArm64SIMDIntegerOpLatency;
397 }
398 
VisitVecOr(HVecOr * instr)399 void SchedulingLatencyVisitorARM64::VisitVecOr([[maybe_unused]] HVecOr* instr) {
400   last_visited_latency_ = kArm64SIMDIntegerOpLatency;
401 }
402 
VisitVecXor(HVecXor * instr)403 void SchedulingLatencyVisitorARM64::VisitVecXor([[maybe_unused]] HVecXor* instr) {
404   last_visited_latency_ = kArm64SIMDIntegerOpLatency;
405 }
406 
VisitVecShl(HVecShl * instr)407 void SchedulingLatencyVisitorARM64::VisitVecShl(HVecShl* instr) {
408   HandleSimpleArithmeticSIMD(instr);
409 }
410 
VisitVecShr(HVecShr * instr)411 void SchedulingLatencyVisitorARM64::VisitVecShr(HVecShr* instr) {
412   HandleSimpleArithmeticSIMD(instr);
413 }
414 
VisitVecUShr(HVecUShr * instr)415 void SchedulingLatencyVisitorARM64::VisitVecUShr(HVecUShr* instr) {
416   HandleSimpleArithmeticSIMD(instr);
417 }
418 
VisitVecSetScalars(HVecSetScalars * instr)419 void SchedulingLatencyVisitorARM64::VisitVecSetScalars(HVecSetScalars* instr) {
420   HandleSimpleArithmeticSIMD(instr);
421 }
422 
VisitVecMultiplyAccumulate(HVecMultiplyAccumulate * instr)423 void SchedulingLatencyVisitorARM64::VisitVecMultiplyAccumulate(
424     [[maybe_unused]] HVecMultiplyAccumulate* instr) {
425   last_visited_latency_ = kArm64SIMDMulIntegerLatency;
426 }
427 
HandleVecAddress(HVecMemoryOperation * instruction,size_t size)428 void SchedulingLatencyVisitorARM64::HandleVecAddress(HVecMemoryOperation* instruction,
429                                                      [[maybe_unused]] size_t size) {
430   HInstruction* index = instruction->InputAt(1);
431   if (!index->IsConstant()) {
432     last_visited_internal_latency_ += kArm64DataProcWithShifterOpLatency;
433   }
434 }
435 
VisitVecLoad(HVecLoad * instr)436 void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) {
437   last_visited_internal_latency_ = 0;
438   size_t size = DataType::Size(instr->GetPackedType());
439 
440   if (instr->GetPackedType() == DataType::Type::kUint16
441       && mirror::kUseStringCompression
442       && instr->IsStringCharAt()) {
443     // Set latencies for the uncompressed case.
444     last_visited_internal_latency_ += kArm64MemoryLoadLatency + kArm64BranchLatency;
445     HandleVecAddress(instr, size);
446     last_visited_latency_ = kArm64SIMDMemoryLoadLatency;
447   } else {
448     HandleVecAddress(instr, size);
449     last_visited_latency_ = kArm64SIMDMemoryLoadLatency;
450   }
451 }
452 
VisitVecStore(HVecStore * instr)453 void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) {
454   last_visited_internal_latency_ = 0;
455   size_t size = DataType::Size(instr->GetPackedType());
456   HandleVecAddress(instr, size);
457   last_visited_latency_ = kArm64SIMDMemoryStoreLatency;
458 }
459 
IsSchedulable(const HInstruction * instruction) const460 bool HSchedulerARM64::IsSchedulable(const HInstruction* instruction) const {
461   switch (instruction->GetKind()) {
462 #define SCHEDULABLE_CASE(type, unused)       \
463     case HInstruction::InstructionKind::k##type:  \
464       return true;
465     FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(SCHEDULABLE_CASE)
466     FOR_EACH_CONCRETE_INSTRUCTION_ARM64(SCHEDULABLE_CASE)
467     FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(SCHEDULABLE_CASE)
468 #undef SCHEDULABLE_CASE
469 
470     default:
471       return HScheduler::IsSchedulable(instruction);
472   }
473 }
474 
475 std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>>
BuildSchedulingGraph(HBasicBlock * block,ScopedArenaAllocator * allocator,const HeapLocationCollector * heap_location_collector)476 HSchedulerARM64::BuildSchedulingGraph(
477     HBasicBlock* block,
478     ScopedArenaAllocator* allocator,
479     const HeapLocationCollector* heap_location_collector) {
480   SchedulingLatencyVisitorARM64 latency_visitor;
481   return HScheduler::BuildSchedulingGraph(
482       block, allocator, heap_location_collector, &latency_visitor);
483 }
484 
485 }  // namespace arm64
486 }  // namespace art
487