1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
18 #define ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
19
20 // This #include should never be used by compilation, because this header file (nodes_vector.h)
21 // is included in the header file nodes.h itself. However it gives editing tools better context.
22 #include "nodes.h"
23
24 namespace art {
25
26 // Memory alignment, represented as an offset relative to a base, where 0 <= offset < base,
27 // and base is a power of two. For example, the value Alignment(16, 0) means memory is
28 // perfectly aligned at a 16-byte boundary, whereas the value Alignment(16, 4) means
29 // memory is always exactly 4 bytes above such a boundary.
30 class Alignment {
31 public:
Alignment(size_t base,size_t offset)32 Alignment(size_t base, size_t offset) : base_(base), offset_(offset) {
33 DCHECK_LT(offset, base);
34 DCHECK(IsPowerOfTwo(base));
35 }
36
37 // Returns true if memory is "at least" aligned at the given boundary.
38 // Assumes requested base is power of two.
IsAlignedAt(size_t base)39 bool IsAlignedAt(size_t base) const {
40 DCHECK_NE(0u, base);
41 DCHECK(IsPowerOfTwo(base));
42 return ((offset_ | base_) & (base - 1u)) == 0;
43 }
44
ToString()45 std::string ToString() const {
46 return "ALIGN(" + std::to_string(base_) + "," + std::to_string(offset_) + ")";
47 }
48
49 private:
50 size_t base_;
51 size_t offset_;
52 };
53
54 //
55 // Definitions of abstract vector operations in HIR.
56 //
57
58 // Abstraction of a vector operation, i.e., an operation that performs
59 // GetVectorLength() x GetPackedType() operations simultaneously.
60 class HVecOperation : public HVariableInputSizeInstruction {
61 public:
HVecOperation(ArenaAllocator * arena,Primitive::Type packed_type,SideEffects side_effects,size_t number_of_inputs,size_t vector_length,uint32_t dex_pc)62 HVecOperation(ArenaAllocator* arena,
63 Primitive::Type packed_type,
64 SideEffects side_effects,
65 size_t number_of_inputs,
66 size_t vector_length,
67 uint32_t dex_pc)
68 : HVariableInputSizeInstruction(side_effects,
69 dex_pc,
70 arena,
71 number_of_inputs,
72 kArenaAllocVectorNode),
73 vector_length_(vector_length) {
74 SetPackedField<TypeField>(packed_type);
75 DCHECK_LT(1u, vector_length);
76 }
77
78 // Returns the number of elements packed in a vector.
GetVectorLength()79 size_t GetVectorLength() const {
80 return vector_length_;
81 }
82
83 // Returns the number of bytes in a full vector.
GetVectorNumberOfBytes()84 size_t GetVectorNumberOfBytes() const {
85 return vector_length_ * Primitive::ComponentSize(GetPackedType());
86 }
87
88 // Returns the type of the vector operation: a SIMD operation looks like a FPU location.
89 // TODO: we could introduce SIMD types in HIR.
GetType()90 Primitive::Type GetType() const OVERRIDE {
91 return Primitive::kPrimDouble;
92 }
93
94 // Returns the true component type packed in a vector.
GetPackedType()95 Primitive::Type GetPackedType() const {
96 return GetPackedField<TypeField>();
97 }
98
99 DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
100
101 protected:
102 // Additional packed bits.
103 static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
104 static constexpr size_t kFieldTypeSize =
105 MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
106 static constexpr size_t kNumberOfVectorOpPackedBits = kFieldType + kFieldTypeSize;
107 static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
108 using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>;
109
110 private:
111 const size_t vector_length_;
112
113 DISALLOW_COPY_AND_ASSIGN(HVecOperation);
114 };
115
116 // Abstraction of a unary vector operation.
117 class HVecUnaryOperation : public HVecOperation {
118 public:
HVecUnaryOperation(ArenaAllocator * arena,HInstruction * input,Primitive::Type packed_type,size_t vector_length,uint32_t dex_pc)119 HVecUnaryOperation(ArenaAllocator* arena,
120 HInstruction* input,
121 Primitive::Type packed_type,
122 size_t vector_length,
123 uint32_t dex_pc)
124 : HVecOperation(arena,
125 packed_type,
126 SideEffects::None(),
127 /* number_of_inputs */ 1,
128 vector_length,
129 dex_pc) {
130 SetRawInputAt(0, input);
131 }
132
GetInput()133 HInstruction* GetInput() const { return InputAt(0); }
134
135 DECLARE_ABSTRACT_INSTRUCTION(VecUnaryOperation);
136
137 private:
138 DISALLOW_COPY_AND_ASSIGN(HVecUnaryOperation);
139 };
140
141 // Abstraction of a binary vector operation.
142 class HVecBinaryOperation : public HVecOperation {
143 public:
HVecBinaryOperation(ArenaAllocator * arena,HInstruction * left,HInstruction * right,Primitive::Type packed_type,size_t vector_length,uint32_t dex_pc)144 HVecBinaryOperation(ArenaAllocator* arena,
145 HInstruction* left,
146 HInstruction* right,
147 Primitive::Type packed_type,
148 size_t vector_length,
149 uint32_t dex_pc)
150 : HVecOperation(arena,
151 packed_type,
152 SideEffects::None(),
153 /* number_of_inputs */ 2,
154 vector_length,
155 dex_pc) {
156 SetRawInputAt(0, left);
157 SetRawInputAt(1, right);
158 }
159
GetLeft()160 HInstruction* GetLeft() const { return InputAt(0); }
GetRight()161 HInstruction* GetRight() const { return InputAt(1); }
162
163 DECLARE_ABSTRACT_INSTRUCTION(VecBinaryOperation);
164
165 private:
166 DISALLOW_COPY_AND_ASSIGN(HVecBinaryOperation);
167 };
168
169 // Abstraction of a vector operation that references memory, with an alignment.
170 // The Android runtime guarantees at least "component size" alignment for array
171 // elements and, thus, vectors.
172 class HVecMemoryOperation : public HVecOperation {
173 public:
HVecMemoryOperation(ArenaAllocator * arena,Primitive::Type packed_type,SideEffects side_effects,size_t number_of_inputs,size_t vector_length,uint32_t dex_pc)174 HVecMemoryOperation(ArenaAllocator* arena,
175 Primitive::Type packed_type,
176 SideEffects side_effects,
177 size_t number_of_inputs,
178 size_t vector_length,
179 uint32_t dex_pc)
180 : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc),
181 alignment_(Primitive::ComponentSize(packed_type), 0) { }
182
SetAlignment(Alignment alignment)183 void SetAlignment(Alignment alignment) { alignment_ = alignment; }
184
GetAlignment()185 Alignment GetAlignment() const { return alignment_; }
186
187 DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation);
188
189 private:
190 Alignment alignment_;
191
192 DISALLOW_COPY_AND_ASSIGN(HVecMemoryOperation);
193 };
194
195 // Packed type consistency checker (same vector length integral types may mix freely).
HasConsistentPackedTypes(HInstruction * input,Primitive::Type type)196 inline static bool HasConsistentPackedTypes(HInstruction* input, Primitive::Type type) {
197 DCHECK(input->IsVecOperation());
198 Primitive::Type input_type = input->AsVecOperation()->GetPackedType();
199 switch (input_type) {
200 case Primitive::kPrimBoolean:
201 case Primitive::kPrimByte:
202 return type == Primitive::kPrimBoolean ||
203 type == Primitive::kPrimByte;
204 case Primitive::kPrimChar:
205 case Primitive::kPrimShort:
206 return type == Primitive::kPrimChar ||
207 type == Primitive::kPrimShort;
208 default:
209 return type == input_type;
210 }
211 }
212
213 //
214 // Definitions of concrete unary vector operations in HIR.
215 //
216
217 // Replicates the given scalar into a vector,
218 // viz. replicate(x) = [ x, .. , x ].
219 class HVecReplicateScalar FINAL : public HVecUnaryOperation {
220 public:
221 HVecReplicateScalar(ArenaAllocator* arena,
222 HInstruction* scalar,
223 Primitive::Type packed_type,
224 size_t vector_length,
225 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,scalar,packed_type,vector_length,dex_pc)226 : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) {
227 DCHECK(!scalar->IsVecOperation());
228 }
229 DECLARE_INSTRUCTION(VecReplicateScalar);
230 private:
231 DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar);
232 };
233
234 // Sum-reduces the given vector into a shorter vector (m < n) or scalar (m = 1),
235 // viz. sum-reduce[ x1, .. , xn ] = [ y1, .., ym ], where yi = sum_j x_j.
236 class HVecSumReduce FINAL : public HVecUnaryOperation {
237 HVecSumReduce(ArenaAllocator* arena,
238 HInstruction* input,
239 Primitive::Type packed_type,
240 size_t vector_length,
241 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,input,packed_type,vector_length,dex_pc)242 : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
243 DCHECK(HasConsistentPackedTypes(input, packed_type));
244 }
245
246 // TODO: probably integral promotion
GetType()247 Primitive::Type GetType() const OVERRIDE { return GetPackedType(); }
248
249 DECLARE_INSTRUCTION(VecSumReduce);
250 private:
251 DISALLOW_COPY_AND_ASSIGN(HVecSumReduce);
252 };
253
254 // Converts every component in the vector,
255 // viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
256 class HVecCnv FINAL : public HVecUnaryOperation {
257 public:
258 HVecCnv(ArenaAllocator* arena,
259 HInstruction* input,
260 Primitive::Type packed_type,
261 size_t vector_length,
262 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,input,packed_type,vector_length,dex_pc)263 : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
264 DCHECK(input->IsVecOperation());
265 DCHECK_NE(GetInputType(), GetResultType()); // actual convert
266 }
267
GetInputType()268 Primitive::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
GetResultType()269 Primitive::Type GetResultType() const { return GetPackedType(); }
270
271 DECLARE_INSTRUCTION(VecCnv);
272
273 private:
274 DISALLOW_COPY_AND_ASSIGN(HVecCnv);
275 };
276
277 // Negates every component in the vector,
278 // viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
279 class HVecNeg FINAL : public HVecUnaryOperation {
280 public:
281 HVecNeg(ArenaAllocator* arena,
282 HInstruction* input,
283 Primitive::Type packed_type,
284 size_t vector_length,
285 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,input,packed_type,vector_length,dex_pc)286 : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
287 DCHECK(HasConsistentPackedTypes(input, packed_type));
288 }
289 DECLARE_INSTRUCTION(VecNeg);
290 private:
291 DISALLOW_COPY_AND_ASSIGN(HVecNeg);
292 };
293
294 // Takes absolute value of every component in the vector,
295 // viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ].
296 class HVecAbs FINAL : public HVecUnaryOperation {
297 public:
298 HVecAbs(ArenaAllocator* arena,
299 HInstruction* input,
300 Primitive::Type packed_type,
301 size_t vector_length,
302 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,input,packed_type,vector_length,dex_pc)303 : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
304 DCHECK(HasConsistentPackedTypes(input, packed_type));
305 }
306 DECLARE_INSTRUCTION(VecAbs);
307 private:
308 DISALLOW_COPY_AND_ASSIGN(HVecAbs);
309 };
310
311 // Bitwise- or boolean-nots every component in the vector,
312 // viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
313 // not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
314 class HVecNot FINAL : public HVecUnaryOperation {
315 public:
316 HVecNot(ArenaAllocator* arena,
317 HInstruction* input,
318 Primitive::Type packed_type,
319 size_t vector_length,
320 uint32_t dex_pc = kNoDexPc)
HVecUnaryOperation(arena,input,packed_type,vector_length,dex_pc)321 : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
322 DCHECK(input->IsVecOperation());
323 }
324 DECLARE_INSTRUCTION(VecNot);
325 private:
326 DISALLOW_COPY_AND_ASSIGN(HVecNot);
327 };
328
329 //
330 // Definitions of concrete binary vector operations in HIR.
331 //
332
333 // Adds every component in the two vectors,
334 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
335 class HVecAdd FINAL : public HVecBinaryOperation {
336 public:
337 HVecAdd(ArenaAllocator* arena,
338 HInstruction* left,
339 HInstruction* right,
340 Primitive::Type packed_type,
341 size_t vector_length,
342 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)343 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
344 DCHECK(HasConsistentPackedTypes(left, packed_type));
345 DCHECK(HasConsistentPackedTypes(right, packed_type));
346 }
347 DECLARE_INSTRUCTION(VecAdd);
348 private:
349 DISALLOW_COPY_AND_ASSIGN(HVecAdd);
350 };
351
352 // Performs halving add on every component in the two vectors, viz.
353 // rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
354 // or [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
355 // for signed operands x, y (sign extension) or unsigned operands x, y (zero extension).
356 class HVecHalvingAdd FINAL : public HVecBinaryOperation {
357 public:
358 HVecHalvingAdd(ArenaAllocator* arena,
359 HInstruction* left,
360 HInstruction* right,
361 Primitive::Type packed_type,
362 size_t vector_length,
363 bool is_unsigned,
364 bool is_rounded,
365 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)366 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
367 DCHECK(HasConsistentPackedTypes(left, packed_type));
368 DCHECK(HasConsistentPackedTypes(right, packed_type));
369 SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
370 SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
371 }
372
IsUnsigned()373 bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
IsRounded()374 bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
375
376 DECLARE_INSTRUCTION(VecHalvingAdd);
377
378 private:
379 // Additional packed bits.
380 static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
381 static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
382 static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
383 static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
384
385 DISALLOW_COPY_AND_ASSIGN(HVecHalvingAdd);
386 };
387
388 // Subtracts every component in the two vectors,
389 // viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
390 class HVecSub FINAL : public HVecBinaryOperation {
391 public:
392 HVecSub(ArenaAllocator* arena,
393 HInstruction* left,
394 HInstruction* right,
395 Primitive::Type packed_type,
396 size_t vector_length,
397 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)398 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
399 DCHECK(HasConsistentPackedTypes(left, packed_type));
400 DCHECK(HasConsistentPackedTypes(right, packed_type));
401 }
402 DECLARE_INSTRUCTION(VecSub);
403 private:
404 DISALLOW_COPY_AND_ASSIGN(HVecSub);
405 };
406
407 // Multiplies every component in the two vectors,
408 // viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
409 class HVecMul FINAL : public HVecBinaryOperation {
410 public:
411 HVecMul(ArenaAllocator* arena,
412 HInstruction* left,
413 HInstruction* right,
414 Primitive::Type packed_type,
415 size_t vector_length,
416 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)417 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
418 DCHECK(HasConsistentPackedTypes(left, packed_type));
419 DCHECK(HasConsistentPackedTypes(right, packed_type));
420 }
421 DECLARE_INSTRUCTION(VecMul);
422 private:
423 DISALLOW_COPY_AND_ASSIGN(HVecMul);
424 };
425
426 // Divides every component in the two vectors,
427 // viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
428 class HVecDiv FINAL : public HVecBinaryOperation {
429 public:
430 HVecDiv(ArenaAllocator* arena,
431 HInstruction* left,
432 HInstruction* right,
433 Primitive::Type packed_type,
434 size_t vector_length,
435 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)436 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
437 DCHECK(HasConsistentPackedTypes(left, packed_type));
438 DCHECK(HasConsistentPackedTypes(right, packed_type));
439 }
440 DECLARE_INSTRUCTION(VecDiv);
441 private:
442 DISALLOW_COPY_AND_ASSIGN(HVecDiv);
443 };
444
445 // Takes minimum of every component in the two vectors,
446 // viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ].
447 class HVecMin FINAL : public HVecBinaryOperation {
448 public:
449 HVecMin(ArenaAllocator* arena,
450 HInstruction* left,
451 HInstruction* right,
452 Primitive::Type packed_type,
453 size_t vector_length,
454 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)455 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
456 DCHECK(HasConsistentPackedTypes(left, packed_type));
457 DCHECK(HasConsistentPackedTypes(right, packed_type));
458 }
459 DECLARE_INSTRUCTION(VecMin);
460 private:
461 DISALLOW_COPY_AND_ASSIGN(HVecMin);
462 };
463
464 // Takes maximum of every component in the two vectors,
465 // viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ].
466 class HVecMax FINAL : public HVecBinaryOperation {
467 public:
468 HVecMax(ArenaAllocator* arena,
469 HInstruction* left,
470 HInstruction* right,
471 Primitive::Type packed_type,
472 size_t vector_length,
473 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)474 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
475 DCHECK(HasConsistentPackedTypes(left, packed_type));
476 DCHECK(HasConsistentPackedTypes(right, packed_type));
477 }
478 DECLARE_INSTRUCTION(VecMax);
479 private:
480 DISALLOW_COPY_AND_ASSIGN(HVecMax);
481 };
482
483 // Bitwise-ands every component in the two vectors,
484 // viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
485 class HVecAnd FINAL : public HVecBinaryOperation {
486 public:
487 HVecAnd(ArenaAllocator* arena,
488 HInstruction* left,
489 HInstruction* right,
490 Primitive::Type packed_type,
491 size_t vector_length,
492 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)493 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
494 DCHECK(left->IsVecOperation() && right->IsVecOperation());
495 }
496 DECLARE_INSTRUCTION(VecAnd);
497 private:
498 DISALLOW_COPY_AND_ASSIGN(HVecAnd);
499 };
500
501 // Bitwise-and-nots every component in the two vectors,
502 // viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
503 class HVecAndNot FINAL : public HVecBinaryOperation {
504 public:
505 HVecAndNot(ArenaAllocator* arena,
506 HInstruction* left,
507 HInstruction* right,
508 Primitive::Type packed_type,
509 size_t vector_length,
510 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)511 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
512 DCHECK(left->IsVecOperation() && right->IsVecOperation());
513 }
514 DECLARE_INSTRUCTION(VecAndNot);
515 private:
516 DISALLOW_COPY_AND_ASSIGN(HVecAndNot);
517 };
518
519 // Bitwise-ors every component in the two vectors,
520 // viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
521 class HVecOr FINAL : public HVecBinaryOperation {
522 public:
523 HVecOr(ArenaAllocator* arena,
524 HInstruction* left,
525 HInstruction* right,
526 Primitive::Type packed_type,
527 size_t vector_length,
528 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)529 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
530 DCHECK(left->IsVecOperation() && right->IsVecOperation());
531 }
532 DECLARE_INSTRUCTION(VecOr);
533 private:
534 DISALLOW_COPY_AND_ASSIGN(HVecOr);
535 };
536
537 // Bitwise-xors every component in the two vectors,
538 // viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
539 class HVecXor FINAL : public HVecBinaryOperation {
540 public:
541 HVecXor(ArenaAllocator* arena,
542 HInstruction* left,
543 HInstruction* right,
544 Primitive::Type packed_type,
545 size_t vector_length,
546 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)547 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
548 DCHECK(left->IsVecOperation() && right->IsVecOperation());
549 }
550 DECLARE_INSTRUCTION(VecXor);
551 private:
552 DISALLOW_COPY_AND_ASSIGN(HVecXor);
553 };
554
555 // Logically shifts every component in the vector left by the given distance,
556 // viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
557 class HVecShl FINAL : public HVecBinaryOperation {
558 public:
559 HVecShl(ArenaAllocator* arena,
560 HInstruction* left,
561 HInstruction* right,
562 Primitive::Type packed_type,
563 size_t vector_length,
564 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)565 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
566 DCHECK(HasConsistentPackedTypes(left, packed_type));
567 }
568 DECLARE_INSTRUCTION(VecShl);
569 private:
570 DISALLOW_COPY_AND_ASSIGN(HVecShl);
571 };
572
573 // Arithmetically shifts every component in the vector right by the given distance,
574 // viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
575 class HVecShr FINAL : public HVecBinaryOperation {
576 public:
577 HVecShr(ArenaAllocator* arena,
578 HInstruction* left,
579 HInstruction* right,
580 Primitive::Type packed_type,
581 size_t vector_length,
582 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)583 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
584 DCHECK(HasConsistentPackedTypes(left, packed_type));
585 }
586 DECLARE_INSTRUCTION(VecShr);
587 private:
588 DISALLOW_COPY_AND_ASSIGN(HVecShr);
589 };
590
591 // Logically shifts every component in the vector right by the given distance,
592 // viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
593 class HVecUShr FINAL : public HVecBinaryOperation {
594 public:
595 HVecUShr(ArenaAllocator* arena,
596 HInstruction* left,
597 HInstruction* right,
598 Primitive::Type packed_type,
599 size_t vector_length,
600 uint32_t dex_pc = kNoDexPc)
HVecBinaryOperation(arena,left,right,packed_type,vector_length,dex_pc)601 : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
602 DCHECK(HasConsistentPackedTypes(left, packed_type));
603 }
604 DECLARE_INSTRUCTION(VecUShr);
605 private:
606 DISALLOW_COPY_AND_ASSIGN(HVecUShr);
607 };
608
609 //
610 // Definitions of concrete miscellaneous vector operations in HIR.
611 //
612
613 // Assigns the given scalar elements to a vector,
614 // viz. set( array(x1, .., xn) ) = [ x1, .. , xn ].
615 class HVecSetScalars FINAL : public HVecOperation {
616 HVecSetScalars(ArenaAllocator* arena,
617 HInstruction** scalars, // array
618 Primitive::Type packed_type,
619 size_t vector_length,
620 uint32_t dex_pc = kNoDexPc)
HVecOperation(arena,packed_type,SideEffects::None (),vector_length,vector_length,dex_pc)621 : HVecOperation(arena,
622 packed_type,
623 SideEffects::None(),
624 /* number_of_inputs */ vector_length,
625 vector_length,
626 dex_pc) {
627 for (size_t i = 0; i < vector_length; i++) {
628 DCHECK(!scalars[i]->IsVecOperation());
629 SetRawInputAt(0, scalars[i]);
630 }
631 }
632 DECLARE_INSTRUCTION(VecSetScalars);
633 private:
634 DISALLOW_COPY_AND_ASSIGN(HVecSetScalars);
635 };
636
637 // Multiplies every component in the two vectors, adds the result vector to the accumulator vector.
638 // viz. [ acc1, .., accn ] + [ x1, .. , xn ] * [ y1, .. , yn ] =
639 // [ acc1 + x1 * y1, .. , accn + xn * yn ].
640 class HVecMultiplyAccumulate FINAL : public HVecOperation {
641 public:
642 HVecMultiplyAccumulate(ArenaAllocator* arena,
643 InstructionKind op,
644 HInstruction* accumulator,
645 HInstruction* mul_left,
646 HInstruction* mul_right,
647 Primitive::Type packed_type,
648 size_t vector_length,
649 uint32_t dex_pc = kNoDexPc)
650 : HVecOperation(arena,
651 packed_type,
652 SideEffects::None(),
653 /* number_of_inputs */ 3,
654 vector_length,
655 dex_pc),
656 op_kind_(op) {
657 DCHECK(op == InstructionKind::kAdd || op == InstructionKind::kSub);
658 DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
659 DCHECK(HasConsistentPackedTypes(mul_left, packed_type));
660 DCHECK(HasConsistentPackedTypes(mul_right, packed_type));
661 SetRawInputAt(kInputAccumulatorIndex, accumulator);
662 SetRawInputAt(kInputMulLeftIndex, mul_left);
663 SetRawInputAt(kInputMulRightIndex, mul_right);
664 }
665
666 static constexpr int kInputAccumulatorIndex = 0;
667 static constexpr int kInputMulLeftIndex = 1;
668 static constexpr int kInputMulRightIndex = 2;
669
CanBeMoved()670 bool CanBeMoved() const OVERRIDE { return true; }
671
InstructionDataEquals(const HInstruction * other)672 bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
673 return op_kind_ == other->AsVecMultiplyAccumulate()->op_kind_;
674 }
675
GetOpKind()676 InstructionKind GetOpKind() const { return op_kind_; }
677
678 DECLARE_INSTRUCTION(VecMultiplyAccumulate);
679
680 private:
681 // Indicates if this is a MADD or MSUB.
682 const InstructionKind op_kind_;
683
684 DISALLOW_COPY_AND_ASSIGN(HVecMultiplyAccumulate);
685 };
686
687 // Loads a vector from memory, viz. load(mem, 1)
688 // yield the vector [ mem(1), .. , mem(n) ].
689 class HVecLoad FINAL : public HVecMemoryOperation {
690 public:
691 HVecLoad(ArenaAllocator* arena,
692 HInstruction* base,
693 HInstruction* index,
694 Primitive::Type packed_type,
695 size_t vector_length,
696 bool is_string_char_at,
697 uint32_t dex_pc = kNoDexPc)
698 : HVecMemoryOperation(arena,
699 packed_type,
700 SideEffects::ArrayReadOfType(packed_type),
701 /* number_of_inputs */ 2,
702 vector_length,
703 dex_pc) {
704 SetRawInputAt(0, base);
705 SetRawInputAt(1, index);
706 SetPackedFlag<kFieldIsStringCharAt>(is_string_char_at);
707 }
708 DECLARE_INSTRUCTION(VecLoad);
709
IsStringCharAt()710 bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
711
712 private:
713 // Additional packed bits.
714 static constexpr size_t kFieldIsStringCharAt = HVecOperation::kNumberOfVectorOpPackedBits;
715 static constexpr size_t kNumberOfVecLoadPackedBits = kFieldIsStringCharAt + 1;
716 static_assert(kNumberOfVecLoadPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
717
718 DISALLOW_COPY_AND_ASSIGN(HVecLoad);
719 };
720
721 // Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
722 // sets mem(1) = x1, .. , mem(n) = xn.
723 class HVecStore FINAL : public HVecMemoryOperation {
724 public:
725 HVecStore(ArenaAllocator* arena,
726 HInstruction* base,
727 HInstruction* index,
728 HInstruction* value,
729 Primitive::Type packed_type,
730 size_t vector_length,
731 uint32_t dex_pc = kNoDexPc)
732 : HVecMemoryOperation(arena,
733 packed_type,
734 SideEffects::ArrayWriteOfType(packed_type),
735 /* number_of_inputs */ 3,
736 vector_length,
737 dex_pc) {
738 DCHECK(HasConsistentPackedTypes(value, packed_type));
739 SetRawInputAt(0, base);
740 SetRawInputAt(1, index);
741 SetRawInputAt(2, value);
742 }
743 DECLARE_INSTRUCTION(VecStore);
744 private:
745 DISALLOW_COPY_AND_ASSIGN(HVecStore);
746 };
747
748 } // namespace art
749
750 #endif // ART_COMPILER_OPTIMIZING_NODES_VECTOR_H_
751