1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "ARMTargetTransformInfo.h"
11 #include "llvm/Support/Debug.h"
12 #include "llvm/Target/CostTable.h"
13 #include "llvm/Target/TargetLowering.h"
14 using namespace llvm;
15
16 #define DEBUG_TYPE "armtti"
17
getIntImmCost(const APInt & Imm,Type * Ty)18 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
19 assert(Ty->isIntegerTy());
20
21 unsigned Bits = Ty->getPrimitiveSizeInBits();
22 if (Bits == 0 || Imm.getActiveBits() >= 64)
23 return 4;
24
25 int64_t SImmVal = Imm.getSExtValue();
26 uint64_t ZImmVal = Imm.getZExtValue();
27 if (!ST->isThumb()) {
28 if ((SImmVal >= 0 && SImmVal < 65536) ||
29 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
30 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
31 return 1;
32 return ST->hasV6T2Ops() ? 2 : 3;
33 }
34 if (ST->isThumb2()) {
35 if ((SImmVal >= 0 && SImmVal < 65536) ||
36 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
37 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
38 return 1;
39 return ST->hasV6T2Ops() ? 2 : 3;
40 }
41 // Thumb1.
42 if (SImmVal >= 0 && SImmVal < 256)
43 return 1;
44 if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
45 return 2;
46 // Load from constantpool.
47 return 3;
48 }
49
50
51 // Constants smaller than 256 fit in the immediate field of
52 // Thumb1 instructions so we return a zero cost and 1 otherwise.
getIntImmCodeSizeCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)53 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
54 const APInt &Imm, Type *Ty) {
55 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
56 return 0;
57
58 return 1;
59 }
60
getIntImmCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)61 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
62 Type *Ty) {
63 // Division by a constant can be turned into multiplication, but only if we
64 // know it's constant. So it's not so much that the immediate is cheap (it's
65 // not), but that the alternative is worse.
66 // FIXME: this is probably unneeded with GlobalISel.
67 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
68 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
69 Idx == 1)
70 return 0;
71
72 return getIntImmCost(Imm, Ty);
73 }
74
75
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src)76 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
77 int ISD = TLI->InstructionOpcodeToISD(Opcode);
78 assert(ISD && "Invalid opcode");
79
80 // Single to/from double precision conversions.
81 static const CostTblEntry NEONFltDblTbl[] = {
82 // Vector fptrunc/fpext conversions.
83 { ISD::FP_ROUND, MVT::v2f64, 2 },
84 { ISD::FP_EXTEND, MVT::v2f32, 2 },
85 { ISD::FP_EXTEND, MVT::v4f32, 4 }
86 };
87
88 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
89 ISD == ISD::FP_EXTEND)) {
90 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
91 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
92 return LT.first * Entry->Cost;
93 }
94
95 EVT SrcTy = TLI->getValueType(DL, Src);
96 EVT DstTy = TLI->getValueType(DL, Dst);
97
98 if (!SrcTy.isSimple() || !DstTy.isSimple())
99 return BaseT::getCastInstrCost(Opcode, Dst, Src);
100
101 // Some arithmetic, load and store operations have specific instructions
102 // to cast up/down their types automatically at no extra cost.
103 // TODO: Get these tables to know at least what the related operations are.
104 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
105 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
106 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
107 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
108 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
109 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
110 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
111
112 // The number of vmovl instructions for the extension.
113 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
114 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
115 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
116 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
117 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
118 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
119 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
120 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
121 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
122 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
123
124 // Operations that we legalize using splitting.
125 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
126 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
127
128 // Vector float <-> i32 conversions.
129 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
130 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
131
132 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
133 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
134 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
135 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
136 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
137 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
138 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
139 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
140 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
141 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
142 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
143 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
144 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
145 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
146 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
147 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
148 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
149 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
150 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
151 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
152
153 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
154 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
155 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
156 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
157 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
158 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
159
160 // Vector double <-> i32 conversions.
161 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
162 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
163
164 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
165 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
166 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
167 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
168 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
169 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
170
171 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
172 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
173 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
174 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
175 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
176 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
177 };
178
179 if (SrcTy.isVector() && ST->hasNEON()) {
180 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
181 DstTy.getSimpleVT(),
182 SrcTy.getSimpleVT()))
183 return Entry->Cost;
184 }
185
186 // Scalar float to integer conversions.
187 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
188 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
189 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
190 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
191 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
192 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
193 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
194 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
195 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
196 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
197 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
198 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
199 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
200 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
201 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
202 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
203 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
204 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
205 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
206 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
207 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
208 };
209 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
210 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
211 DstTy.getSimpleVT(),
212 SrcTy.getSimpleVT()))
213 return Entry->Cost;
214 }
215
216 // Scalar integer to float conversions.
217 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
218 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
219 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
220 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
221 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
222 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
223 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
224 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
225 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
226 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
227 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
228 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
229 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
230 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
231 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
232 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
233 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
234 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
235 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
236 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
237 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
238 };
239
240 if (SrcTy.isInteger() && ST->hasNEON()) {
241 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
242 ISD, DstTy.getSimpleVT(),
243 SrcTy.getSimpleVT()))
244 return Entry->Cost;
245 }
246
247 // Scalar integer conversion costs.
248 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
249 // i16 -> i64 requires two dependent operations.
250 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
251
252 // Truncates on i64 are assumed to be free.
253 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
254 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
255 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
256 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
257 };
258
259 if (SrcTy.isInteger()) {
260 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
261 DstTy.getSimpleVT(),
262 SrcTy.getSimpleVT()))
263 return Entry->Cost;
264 }
265
266 return BaseT::getCastInstrCost(Opcode, Dst, Src);
267 }
268
getVectorInstrCost(unsigned Opcode,Type * ValTy,unsigned Index)269 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
270 unsigned Index) {
271 // Penalize inserting into an D-subregister. We end up with a three times
272 // lower estimated throughput on swift.
273 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
274 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
275 return 3;
276
277 if ((Opcode == Instruction::InsertElement ||
278 Opcode == Instruction::ExtractElement)) {
279 // Cross-class copies are expensive on many microarchitectures,
280 // so assume they are expensive by default.
281 if (ValTy->getVectorElementType()->isIntegerTy())
282 return 3;
283
284 // Even if it's not a cross class copy, this likely leads to mixing
285 // of NEON and VFP code and should be therefore penalized.
286 if (ValTy->isVectorTy() &&
287 ValTy->getScalarSizeInBits() <= 32)
288 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
289 }
290
291 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
292 }
293
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy)294 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
295
296 int ISD = TLI->InstructionOpcodeToISD(Opcode);
297 // On NEON a a vector select gets lowered to vbsl.
298 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
299 // Lowering of some vector selects is currently far from perfect.
300 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
301 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
302 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
303 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
304 };
305
306 EVT SelCondTy = TLI->getValueType(DL, CondTy);
307 EVT SelValTy = TLI->getValueType(DL, ValTy);
308 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
309 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
310 SelCondTy.getSimpleVT(),
311 SelValTy.getSimpleVT()))
312 return Entry->Cost;
313 }
314
315 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
316 return LT.first;
317 }
318
319 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
320 }
321
getAddressComputationCost(Type * Ty,bool IsComplex)322 int ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
323 // Address computations in vectorized code with non-consecutive addresses will
324 // likely result in more instructions compared to scalar code where the
325 // computation can more often be merged into the index mode. The resulting
326 // extra micro-ops can significantly decrease throughput.
327 unsigned NumVectorInstToHideOverhead = 10;
328
329 if (Ty->isVectorTy() && IsComplex)
330 return NumVectorInstToHideOverhead;
331
332 // In many cases the address computation is not merged into the instruction
333 // addressing mode.
334 return 1;
335 }
336
getFPOpCost(Type * Ty)337 int ARMTTIImpl::getFPOpCost(Type *Ty) {
338 // Use similar logic that's in ARMISelLowering:
339 // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access
340 // to VFP.
341
342 if (ST->hasVFP2() && !ST->isThumb1Only()) {
343 if (Ty->isFloatTy()) {
344 return TargetTransformInfo::TCC_Basic;
345 }
346
347 if (Ty->isDoubleTy()) {
348 return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive :
349 TargetTransformInfo::TCC_Basic;
350 }
351 }
352
353 return TargetTransformInfo::TCC_Expensive;
354 }
355
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)356 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
357 Type *SubTp) {
358 // We only handle costs of reverse and alternate shuffles for now.
359 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
360 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
361
362 if (Kind == TTI::SK_Reverse) {
363 static const CostTblEntry NEONShuffleTbl[] = {
364 // Reverse shuffle cost one instruction if we are shuffling within a
365 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
366 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
367 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
368 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
369 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
370
371 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
372 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
373 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
374 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
375
376 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
377
378 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE,
379 LT.second))
380 return LT.first * Entry->Cost;
381
382 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
383 }
384 if (Kind == TTI::SK_Alternate) {
385 static const CostTblEntry NEONAltShuffleTbl[] = {
386 // Alt shuffle cost table for ARM. Cost is the number of instructions
387 // required to create the shuffled vector.
388
389 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
390 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
391 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
392 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
393
394 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
395 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
396 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
397
398 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
399
400 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
401
402 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
403 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl,
404 ISD::VECTOR_SHUFFLE, LT.second))
405 return LT.first * Entry->Cost;
406 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
407 }
408 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
409 }
410
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo)411 int ARMTTIImpl::getArithmeticInstrCost(
412 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
413 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
414 TTI::OperandValueProperties Opd2PropInfo) {
415
416 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
417 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
418
419 const unsigned FunctionCallDivCost = 20;
420 const unsigned ReciprocalDivCost = 10;
421 static const CostTblEntry CostTbl[] = {
422 // Division.
423 // These costs are somewhat random. Choose a cost of 20 to indicate that
424 // vectorizing devision (added function call) is going to be very expensive.
425 // Double registers types.
426 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
427 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
428 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
429 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
430 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
431 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
432 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
433 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
434 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
435 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
436 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
437 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
438 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
439 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
440 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
441 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
442 // Quad register types.
443 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
444 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
445 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
446 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
447 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
448 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
449 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
450 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
451 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
452 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
453 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
454 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
455 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
456 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
457 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
458 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
459 // Multiplication.
460 };
461
462 if (ST->hasNEON())
463 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
464 return LT.first * Entry->Cost;
465
466 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
467 Opd1PropInfo, Opd2PropInfo);
468
469 // This is somewhat of a hack. The problem that we are facing is that SROA
470 // creates a sequence of shift, and, or instructions to construct values.
471 // These sequences are recognized by the ISel and have zero-cost. Not so for
472 // the vectorized code. Because we have support for v2i64 but not i64 those
473 // sequences look particularly beneficial to vectorize.
474 // To work around this we increase the cost of v2i64 operations to make them
475 // seem less beneficial.
476 if (LT.second == MVT::v2i64 &&
477 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
478 Cost += 4;
479
480 return Cost;
481 }
482
getMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)483 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
484 unsigned AddressSpace) {
485 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
486
487 if (Src->isVectorTy() && Alignment != 16 &&
488 Src->getVectorElementType()->isDoubleTy()) {
489 // Unaligned loads/stores are extremely inefficient.
490 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
491 return LT.first * 4;
492 }
493 return LT.first;
494 }
495
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,unsigned Alignment,unsigned AddressSpace)496 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
497 unsigned Factor,
498 ArrayRef<unsigned> Indices,
499 unsigned Alignment,
500 unsigned AddressSpace) {
501 assert(Factor >= 2 && "Invalid interleave factor");
502 assert(isa<VectorType>(VecTy) && "Expect a vector type");
503
504 // vldN/vstN doesn't support vector types of i64/f64 element.
505 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
506
507 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) {
508 unsigned NumElts = VecTy->getVectorNumElements();
509 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
510 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
511
512 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
513 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
514 return Factor;
515 }
516
517 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
518 Alignment, AddressSpace);
519 }
520