1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Builtin calls as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGObjCRuntime.h"
15 #include "CGOpenCLRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "ConstantEmitter.h"
20 #include "PatternInit.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/OSLog.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/IntrinsicsAArch64.h"
36 #include "llvm/IR/IntrinsicsAMDGPU.h"
37 #include "llvm/IR/IntrinsicsARM.h"
38 #include "llvm/IR/IntrinsicsBPF.h"
39 #include "llvm/IR/IntrinsicsHexagon.h"
40 #include "llvm/IR/IntrinsicsNVPTX.h"
41 #include "llvm/IR/IntrinsicsPowerPC.h"
42 #include "llvm/IR/IntrinsicsR600.h"
43 #include "llvm/IR/IntrinsicsS390.h"
44 #include "llvm/IR/IntrinsicsWebAssembly.h"
45 #include "llvm/IR/IntrinsicsX86.h"
46 #include "llvm/IR/MDBuilder.h"
47 #include "llvm/IR/MatrixBuilder.h"
48 #include "llvm/Support/ConvertUTF.h"
49 #include "llvm/Support/ScopedPrinter.h"
50 #include "llvm/Support/X86TargetParser.h"
51 #include <sstream>
52
53 using namespace clang;
54 using namespace CodeGen;
55 using namespace llvm;
56
57 static
clamp(int64_t Value,int64_t Low,int64_t High)58 int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
59 return std::min(High, std::max(Low, Value));
60 }
61
initializeAlloca(CodeGenFunction & CGF,AllocaInst * AI,Value * Size,Align AlignmentInBytes)62 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
63 Align AlignmentInBytes) {
64 ConstantInt *Byte;
65 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
66 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
67 // Nothing to initialize.
68 return;
69 case LangOptions::TrivialAutoVarInitKind::Zero:
70 Byte = CGF.Builder.getInt8(0x00);
71 break;
72 case LangOptions::TrivialAutoVarInitKind::Pattern: {
73 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
74 Byte = llvm::dyn_cast<llvm::ConstantInt>(
75 initializationPatternFor(CGF.CGM, Int8));
76 break;
77 }
78 }
79 if (CGF.CGM.stopAutoInit())
80 return;
81 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
82 I->addAnnotationMetadata("auto-init");
83 }
84
85 /// getBuiltinLibFunction - Given a builtin id for a function like
86 /// "__builtin_fabsf", return a Function* for "fabsf".
getBuiltinLibFunction(const FunctionDecl * FD,unsigned BuiltinID)87 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
88 unsigned BuiltinID) {
89 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
90
91 // Get the name, skip over the __builtin_ prefix (if necessary).
92 StringRef Name;
93 GlobalDecl D(FD);
94
95 // If the builtin has been declared explicitly with an assembler label,
96 // use the mangled name. This differs from the plain label on platforms
97 // that prefix labels.
98 if (FD->hasAttr<AsmLabelAttr>())
99 Name = getMangledName(D);
100 else
101 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
102
103 llvm::FunctionType *Ty =
104 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
105
106 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
107 }
108
109 /// Emit the conversions required to turn the given value into an
110 /// integer of the given size.
EmitToInt(CodeGenFunction & CGF,llvm::Value * V,QualType T,llvm::IntegerType * IntType)111 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
112 QualType T, llvm::IntegerType *IntType) {
113 V = CGF.EmitToMemory(V, T);
114
115 if (V->getType()->isPointerTy())
116 return CGF.Builder.CreatePtrToInt(V, IntType);
117
118 assert(V->getType() == IntType);
119 return V;
120 }
121
EmitFromInt(CodeGenFunction & CGF,llvm::Value * V,QualType T,llvm::Type * ResultType)122 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
123 QualType T, llvm::Type *ResultType) {
124 V = CGF.EmitFromMemory(V, T);
125
126 if (ResultType->isPointerTy())
127 return CGF.Builder.CreateIntToPtr(V, ResultType);
128
129 assert(V->getType() == ResultType);
130 return V;
131 }
132
133 /// Utility to insert an atomic instruction based on Intrinsic::ID
134 /// and the expression node.
MakeBinaryAtomicValue(CodeGenFunction & CGF,llvm::AtomicRMWInst::BinOp Kind,const CallExpr * E,AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)135 static Value *MakeBinaryAtomicValue(
136 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
137 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
138 QualType T = E->getType();
139 assert(E->getArg(0)->getType()->isPointerType());
140 assert(CGF.getContext().hasSameUnqualifiedType(T,
141 E->getArg(0)->getType()->getPointeeType()));
142 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
143
144 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
145 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
146
147 llvm::IntegerType *IntType =
148 llvm::IntegerType::get(CGF.getLLVMContext(),
149 CGF.getContext().getTypeSize(T));
150 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
151
152 llvm::Value *Args[2];
153 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
154 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
155 llvm::Type *ValueType = Args[1]->getType();
156 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
157
158 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
159 Kind, Args[0], Args[1], Ordering);
160 return EmitFromInt(CGF, Result, T, ValueType);
161 }
162
EmitNontemporalStore(CodeGenFunction & CGF,const CallExpr * E)163 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
164 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
165 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
166
167 // Convert the type of the pointer to a pointer to the stored type.
168 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
169 Value *BC = CGF.Builder.CreateBitCast(
170 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
171 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
172 LV.setNontemporal(true);
173 CGF.EmitStoreOfScalar(Val, LV, false);
174 return nullptr;
175 }
176
EmitNontemporalLoad(CodeGenFunction & CGF,const CallExpr * E)177 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
178 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
179
180 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
181 LV.setNontemporal(true);
182 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
183 }
184
EmitBinaryAtomic(CodeGenFunction & CGF,llvm::AtomicRMWInst::BinOp Kind,const CallExpr * E)185 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
186 llvm::AtomicRMWInst::BinOp Kind,
187 const CallExpr *E) {
188 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
189 }
190
191 /// Utility to insert an atomic instruction based Intrinsic::ID and
192 /// the expression node, where the return value is the result of the
193 /// operation.
EmitBinaryAtomicPost(CodeGenFunction & CGF,llvm::AtomicRMWInst::BinOp Kind,const CallExpr * E,Instruction::BinaryOps Op,bool Invert=false)194 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
195 llvm::AtomicRMWInst::BinOp Kind,
196 const CallExpr *E,
197 Instruction::BinaryOps Op,
198 bool Invert = false) {
199 QualType T = E->getType();
200 assert(E->getArg(0)->getType()->isPointerType());
201 assert(CGF.getContext().hasSameUnqualifiedType(T,
202 E->getArg(0)->getType()->getPointeeType()));
203 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
204
205 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
206 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
207
208 llvm::IntegerType *IntType =
209 llvm::IntegerType::get(CGF.getLLVMContext(),
210 CGF.getContext().getTypeSize(T));
211 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
212
213 llvm::Value *Args[2];
214 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
215 llvm::Type *ValueType = Args[1]->getType();
216 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
217 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
218
219 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
220 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
221 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
222 if (Invert)
223 Result =
224 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
225 llvm::ConstantInt::getAllOnesValue(IntType));
226 Result = EmitFromInt(CGF, Result, T, ValueType);
227 return RValue::get(Result);
228 }
229
230 /// Utility to insert an atomic cmpxchg instruction.
231 ///
232 /// @param CGF The current codegen function.
233 /// @param E Builtin call expression to convert to cmpxchg.
234 /// arg0 - address to operate on
235 /// arg1 - value to compare with
236 /// arg2 - new value
237 /// @param ReturnBool Specifies whether to return success flag of
238 /// cmpxchg result or the old value.
239 ///
240 /// @returns result of cmpxchg, according to ReturnBool
241 ///
242 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
243 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
MakeAtomicCmpXchgValue(CodeGenFunction & CGF,const CallExpr * E,bool ReturnBool)244 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
245 bool ReturnBool) {
246 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
247 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
248 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
249
250 llvm::IntegerType *IntType = llvm::IntegerType::get(
251 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
252 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
253
254 Value *Args[3];
255 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
256 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
257 llvm::Type *ValueType = Args[1]->getType();
258 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
259 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
260
261 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
262 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
263 llvm::AtomicOrdering::SequentiallyConsistent);
264 if (ReturnBool)
265 // Extract boolean success flag and zext it to int.
266 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
267 CGF.ConvertType(E->getType()));
268 else
269 // Extract old value and emit it using the same type as compare value.
270 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
271 ValueType);
272 }
273
274 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
275 /// _InterlockedCompareExchange* intrinsics which have the following signature:
276 /// T _InterlockedCompareExchange(T volatile *Destination,
277 /// T Exchange,
278 /// T Comparand);
279 ///
280 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
281 /// cmpxchg *Destination, Comparand, Exchange.
282 /// So we need to swap Comparand and Exchange when invoking
283 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
284 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
285 /// already swapped.
286
287 static
EmitAtomicCmpXchgForMSIntrin(CodeGenFunction & CGF,const CallExpr * E,AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)288 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
289 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
290 assert(E->getArg(0)->getType()->isPointerType());
291 assert(CGF.getContext().hasSameUnqualifiedType(
292 E->getType(), E->getArg(0)->getType()->getPointeeType()));
293 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
294 E->getArg(1)->getType()));
295 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
296 E->getArg(2)->getType()));
297
298 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
299 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
300 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
301
302 // For Release ordering, the failure ordering should be Monotonic.
303 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
304 AtomicOrdering::Monotonic :
305 SuccessOrdering;
306
307 // The atomic instruction is marked volatile for consistency with MSVC. This
308 // blocks the few atomics optimizations that LLVM has. If we want to optimize
309 // _Interlocked* operations in the future, we will have to remove the volatile
310 // marker.
311 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
312 Destination, Comparand, Exchange,
313 SuccessOrdering, FailureOrdering);
314 Result->setVolatile(true);
315 return CGF.Builder.CreateExtractValue(Result, 0);
316 }
317
318 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
319 // prototyped like this:
320 //
321 // unsigned char _InterlockedCompareExchange128...(
322 // __int64 volatile * _Destination,
323 // __int64 _ExchangeHigh,
324 // __int64 _ExchangeLow,
325 // __int64 * _ComparandResult);
EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction & CGF,const CallExpr * E,AtomicOrdering SuccessOrdering)326 static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
327 const CallExpr *E,
328 AtomicOrdering SuccessOrdering) {
329 assert(E->getNumArgs() == 4);
330 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
331 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
332 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
333 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
334
335 assert(Destination->getType()->isPointerTy());
336 assert(!ExchangeHigh->getType()->isPointerTy());
337 assert(!ExchangeLow->getType()->isPointerTy());
338 assert(ComparandPtr->getType()->isPointerTy());
339
340 // For Release ordering, the failure ordering should be Monotonic.
341 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
342 ? AtomicOrdering::Monotonic
343 : SuccessOrdering;
344
345 // Convert to i128 pointers and values.
346 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
347 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
348 Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
349 Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
350 CGF.getContext().toCharUnitsFromBits(128));
351
352 // (((i128)hi) << 64) | ((i128)lo)
353 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
354 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
355 ExchangeHigh =
356 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
357 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
358
359 // Load the comparand for the instruction.
360 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
361
362 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
363 SuccessOrdering, FailureOrdering);
364
365 // The atomic instruction is marked volatile for consistency with MSVC. This
366 // blocks the few atomics optimizations that LLVM has. If we want to optimize
367 // _Interlocked* operations in the future, we will have to remove the volatile
368 // marker.
369 CXI->setVolatile(true);
370
371 // Store the result as an outparameter.
372 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
373 ComparandResult);
374
375 // Get the success boolean and zero extend it to i8.
376 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
377 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
378 }
379
EmitAtomicIncrementValue(CodeGenFunction & CGF,const CallExpr * E,AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)380 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
381 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
382 assert(E->getArg(0)->getType()->isPointerType());
383
384 auto *IntTy = CGF.ConvertType(E->getType());
385 auto *Result = CGF.Builder.CreateAtomicRMW(
386 AtomicRMWInst::Add,
387 CGF.EmitScalarExpr(E->getArg(0)),
388 ConstantInt::get(IntTy, 1),
389 Ordering);
390 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
391 }
392
EmitAtomicDecrementValue(CodeGenFunction & CGF,const CallExpr * E,AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)393 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
394 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
395 assert(E->getArg(0)->getType()->isPointerType());
396
397 auto *IntTy = CGF.ConvertType(E->getType());
398 auto *Result = CGF.Builder.CreateAtomicRMW(
399 AtomicRMWInst::Sub,
400 CGF.EmitScalarExpr(E->getArg(0)),
401 ConstantInt::get(IntTy, 1),
402 Ordering);
403 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
404 }
405
406 // Build a plain volatile load.
EmitISOVolatileLoad(CodeGenFunction & CGF,const CallExpr * E)407 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
408 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
409 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
410 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
411 llvm::Type *ITy =
412 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
413 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
414 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
415 Load->setVolatile(true);
416 return Load;
417 }
418
419 // Build a plain volatile store.
EmitISOVolatileStore(CodeGenFunction & CGF,const CallExpr * E)420 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
421 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
422 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
423 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
424 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
425 llvm::Type *ITy =
426 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
427 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
428 llvm::StoreInst *Store =
429 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
430 Store->setVolatile(true);
431 return Store;
432 }
433
434 // Emit a simple mangled intrinsic that has 1 argument and a return type
435 // matching the argument type. Depending on mode, this may be a constrained
436 // floating-point intrinsic.
emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID,unsigned ConstrainedIntrinsicID)437 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
438 const CallExpr *E, unsigned IntrinsicID,
439 unsigned ConstrainedIntrinsicID) {
440 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
441
442 if (CGF.Builder.getIsFPConstrained()) {
443 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
444 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
445 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
446 } else {
447 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
448 return CGF.Builder.CreateCall(F, Src0);
449 }
450 }
451
452 // Emit an intrinsic that has 2 operands of the same type as its result.
453 // Depending on mode, this may be a constrained floating-point intrinsic.
emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID,unsigned ConstrainedIntrinsicID)454 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
455 const CallExpr *E, unsigned IntrinsicID,
456 unsigned ConstrainedIntrinsicID) {
457 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
458 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
459
460 if (CGF.Builder.getIsFPConstrained()) {
461 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
462 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
463 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
464 } else {
465 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
466 return CGF.Builder.CreateCall(F, { Src0, Src1 });
467 }
468 }
469
470 // Emit an intrinsic that has 3 operands of the same type as its result.
471 // Depending on mode, this may be a constrained floating-point intrinsic.
emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID,unsigned ConstrainedIntrinsicID)472 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
473 const CallExpr *E, unsigned IntrinsicID,
474 unsigned ConstrainedIntrinsicID) {
475 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
476 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
477 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
478
479 if (CGF.Builder.getIsFPConstrained()) {
480 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
481 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
482 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
483 } else {
484 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
485 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
486 }
487 }
488
489 // Emit an intrinsic where all operands are of the same type as the result.
490 // Depending on mode, this may be a constrained floating-point intrinsic.
emitCallMaybeConstrainedFPBuiltin(CodeGenFunction & CGF,unsigned IntrinsicID,unsigned ConstrainedIntrinsicID,llvm::Type * Ty,ArrayRef<Value * > Args)491 static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
492 unsigned IntrinsicID,
493 unsigned ConstrainedIntrinsicID,
494 llvm::Type *Ty,
495 ArrayRef<Value *> Args) {
496 Function *F;
497 if (CGF.Builder.getIsFPConstrained())
498 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
499 else
500 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
501
502 if (CGF.Builder.getIsFPConstrained())
503 return CGF.Builder.CreateConstrainedFPCall(F, Args);
504 else
505 return CGF.Builder.CreateCall(F, Args);
506 }
507
508 // Emit a simple mangled intrinsic that has 1 argument and a return type
509 // matching the argument type.
emitUnaryBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)510 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
511 const CallExpr *E,
512 unsigned IntrinsicID) {
513 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
514
515 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
516 return CGF.Builder.CreateCall(F, Src0);
517 }
518
519 // Emit an intrinsic that has 2 operands of the same type as its result.
emitBinaryBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)520 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
521 const CallExpr *E,
522 unsigned IntrinsicID) {
523 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
524 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
525
526 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
527 return CGF.Builder.CreateCall(F, { Src0, Src1 });
528 }
529
530 // Emit an intrinsic that has 3 operands of the same type as its result.
emitTernaryBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)531 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
532 const CallExpr *E,
533 unsigned IntrinsicID) {
534 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
535 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
536 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
537
538 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
539 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
540 }
541
542 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
emitFPIntBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)543 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
544 const CallExpr *E,
545 unsigned IntrinsicID) {
546 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
547 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
548
549 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
550 return CGF.Builder.CreateCall(F, {Src0, Src1});
551 }
552
553 // Emit an intrinsic that has overloaded integer result and fp operand.
554 static Value *
emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID,unsigned ConstrainedIntrinsicID)555 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
556 unsigned IntrinsicID,
557 unsigned ConstrainedIntrinsicID) {
558 llvm::Type *ResultType = CGF.ConvertType(E->getType());
559 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
560
561 if (CGF.Builder.getIsFPConstrained()) {
562 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
563 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
564 {ResultType, Src0->getType()});
565 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
566 } else {
567 Function *F =
568 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
569 return CGF.Builder.CreateCall(F, Src0);
570 }
571 }
572
573 /// EmitFAbs - Emit a call to @llvm.fabs().
EmitFAbs(CodeGenFunction & CGF,Value * V)574 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
575 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
576 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
577 Call->setDoesNotAccessMemory();
578 return Call;
579 }
580
581 /// Emit the computation of the sign bit for a floating point value. Returns
582 /// the i1 sign bit value.
EmitSignBit(CodeGenFunction & CGF,Value * V)583 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
584 LLVMContext &C = CGF.CGM.getLLVMContext();
585
586 llvm::Type *Ty = V->getType();
587 int Width = Ty->getPrimitiveSizeInBits();
588 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
589 V = CGF.Builder.CreateBitCast(V, IntTy);
590 if (Ty->isPPC_FP128Ty()) {
591 // We want the sign bit of the higher-order double. The bitcast we just
592 // did works as if the double-double was stored to memory and then
593 // read as an i128. The "store" will put the higher-order double in the
594 // lower address in both little- and big-Endian modes, but the "load"
595 // will treat those bits as a different part of the i128: the low bits in
596 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
597 // we need to shift the high bits down to the low before truncating.
598 Width >>= 1;
599 if (CGF.getTarget().isBigEndian()) {
600 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
601 V = CGF.Builder.CreateLShr(V, ShiftCst);
602 }
603 // We are truncating value in order to extract the higher-order
604 // double, which we will be using to extract the sign from.
605 IntTy = llvm::IntegerType::get(C, Width);
606 V = CGF.Builder.CreateTrunc(V, IntTy);
607 }
608 Value *Zero = llvm::Constant::getNullValue(IntTy);
609 return CGF.Builder.CreateICmpSLT(V, Zero);
610 }
611
emitLibraryCall(CodeGenFunction & CGF,const FunctionDecl * FD,const CallExpr * E,llvm::Constant * calleeValue)612 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
613 const CallExpr *E, llvm::Constant *calleeValue) {
614 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
615 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
616 }
617
618 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
619 /// depending on IntrinsicID.
620 ///
621 /// \arg CGF The current codegen function.
622 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
623 /// \arg X The first argument to the llvm.*.with.overflow.*.
624 /// \arg Y The second argument to the llvm.*.with.overflow.*.
625 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
626 /// \returns The result (i.e. sum/product) returned by the intrinsic.
EmitOverflowIntrinsic(CodeGenFunction & CGF,const llvm::Intrinsic::ID IntrinsicID,llvm::Value * X,llvm::Value * Y,llvm::Value * & Carry)627 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
628 const llvm::Intrinsic::ID IntrinsicID,
629 llvm::Value *X, llvm::Value *Y,
630 llvm::Value *&Carry) {
631 // Make sure we have integers of the same width.
632 assert(X->getType() == Y->getType() &&
633 "Arguments must be the same type. (Did you forget to make sure both "
634 "arguments have the same integer width?)");
635
636 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
637 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
638 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
639 return CGF.Builder.CreateExtractValue(Tmp, 0);
640 }
641
emitRangedBuiltin(CodeGenFunction & CGF,unsigned IntrinsicID,int low,int high)642 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
643 unsigned IntrinsicID,
644 int low, int high) {
645 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
646 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
647 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
648 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
649 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
650 return Call;
651 }
652
653 namespace {
654 struct WidthAndSignedness {
655 unsigned Width;
656 bool Signed;
657 };
658 }
659
660 static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext & context,const clang::QualType Type)661 getIntegerWidthAndSignedness(const clang::ASTContext &context,
662 const clang::QualType Type) {
663 assert(Type->isIntegerType() && "Given type is not an integer.");
664 unsigned Width = Type->isBooleanType() ? 1
665 : Type->isExtIntType() ? context.getIntWidth(Type)
666 : context.getTypeInfo(Type).Width;
667 bool Signed = Type->isSignedIntegerType();
668 return {Width, Signed};
669 }
670
671 // Given one or more integer types, this function produces an integer type that
672 // encompasses them: any value in one of the given types could be expressed in
673 // the encompassing type.
674 static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types)675 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
676 assert(Types.size() > 0 && "Empty list of types.");
677
678 // If any of the given types is signed, we must return a signed type.
679 bool Signed = false;
680 for (const auto &Type : Types) {
681 Signed |= Type.Signed;
682 }
683
684 // The encompassing type must have a width greater than or equal to the width
685 // of the specified types. Additionally, if the encompassing type is signed,
686 // its width must be strictly greater than the width of any unsigned types
687 // given.
688 unsigned Width = 0;
689 for (const auto &Type : Types) {
690 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
691 if (Width < MinWidth) {
692 Width = MinWidth;
693 }
694 }
695
696 return {Width, Signed};
697 }
698
EmitVAStartEnd(Value * ArgValue,bool IsStart)699 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
700 llvm::Type *DestType = Int8PtrTy;
701 if (ArgValue->getType() != DestType)
702 ArgValue =
703 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
704
705 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
706 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
707 }
708
709 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
710 /// __builtin_object_size(p, @p To) is correct
areBOSTypesCompatible(int From,int To)711 static bool areBOSTypesCompatible(int From, int To) {
712 // Note: Our __builtin_object_size implementation currently treats Type=0 and
713 // Type=2 identically. Encoding this implementation detail here may make
714 // improving __builtin_object_size difficult in the future, so it's omitted.
715 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
716 }
717
718 static llvm::Value *
getDefaultBuiltinObjectSizeResult(unsigned Type,llvm::IntegerType * ResType)719 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
720 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
721 }
722
723 llvm::Value *
evaluateOrEmitBuiltinObjectSize(const Expr * E,unsigned Type,llvm::IntegerType * ResType,llvm::Value * EmittedE,bool IsDynamic)724 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
725 llvm::IntegerType *ResType,
726 llvm::Value *EmittedE,
727 bool IsDynamic) {
728 uint64_t ObjectSize;
729 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
730 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
731 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
732 }
733
734 /// Returns a Value corresponding to the size of the given expression.
735 /// This Value may be either of the following:
736 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
737 /// it)
738 /// - A call to the @llvm.objectsize intrinsic
739 ///
740 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
741 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
742 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
743 llvm::Value *
emitBuiltinObjectSize(const Expr * E,unsigned Type,llvm::IntegerType * ResType,llvm::Value * EmittedE,bool IsDynamic)744 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
745 llvm::IntegerType *ResType,
746 llvm::Value *EmittedE, bool IsDynamic) {
747 // We need to reference an argument if the pointer is a parameter with the
748 // pass_object_size attribute.
749 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
750 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
751 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
752 if (Param != nullptr && PS != nullptr &&
753 areBOSTypesCompatible(PS->getType(), Type)) {
754 auto Iter = SizeArguments.find(Param);
755 assert(Iter != SizeArguments.end());
756
757 const ImplicitParamDecl *D = Iter->second;
758 auto DIter = LocalDeclMap.find(D);
759 assert(DIter != LocalDeclMap.end());
760
761 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
762 getContext().getSizeType(), E->getBeginLoc());
763 }
764 }
765
766 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
767 // evaluate E for side-effects. In either case, we shouldn't lower to
768 // @llvm.objectsize.
769 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
770 return getDefaultBuiltinObjectSizeResult(Type, ResType);
771
772 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
773 assert(Ptr->getType()->isPointerTy() &&
774 "Non-pointer passed to __builtin_object_size?");
775
776 Function *F =
777 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
778
779 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
780 Value *Min = Builder.getInt1((Type & 2) != 0);
781 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
782 Value *NullIsUnknown = Builder.getTrue();
783 Value *Dynamic = Builder.getInt1(IsDynamic);
784 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
785 }
786
787 namespace {
788 /// A struct to generically describe a bit test intrinsic.
789 struct BitTest {
790 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
791 enum InterlockingKind : uint8_t {
792 Unlocked,
793 Sequential,
794 Acquire,
795 Release,
796 NoFence
797 };
798
799 ActionKind Action;
800 InterlockingKind Interlocking;
801 bool Is64Bit;
802
803 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
804 };
805 } // namespace
806
decodeBitTestBuiltin(unsigned BuiltinID)807 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
808 switch (BuiltinID) {
809 // Main portable variants.
810 case Builtin::BI_bittest:
811 return {TestOnly, Unlocked, false};
812 case Builtin::BI_bittestandcomplement:
813 return {Complement, Unlocked, false};
814 case Builtin::BI_bittestandreset:
815 return {Reset, Unlocked, false};
816 case Builtin::BI_bittestandset:
817 return {Set, Unlocked, false};
818 case Builtin::BI_interlockedbittestandreset:
819 return {Reset, Sequential, false};
820 case Builtin::BI_interlockedbittestandset:
821 return {Set, Sequential, false};
822
823 // X86-specific 64-bit variants.
824 case Builtin::BI_bittest64:
825 return {TestOnly, Unlocked, true};
826 case Builtin::BI_bittestandcomplement64:
827 return {Complement, Unlocked, true};
828 case Builtin::BI_bittestandreset64:
829 return {Reset, Unlocked, true};
830 case Builtin::BI_bittestandset64:
831 return {Set, Unlocked, true};
832 case Builtin::BI_interlockedbittestandreset64:
833 return {Reset, Sequential, true};
834 case Builtin::BI_interlockedbittestandset64:
835 return {Set, Sequential, true};
836
837 // ARM/AArch64-specific ordering variants.
838 case Builtin::BI_interlockedbittestandset_acq:
839 return {Set, Acquire, false};
840 case Builtin::BI_interlockedbittestandset_rel:
841 return {Set, Release, false};
842 case Builtin::BI_interlockedbittestandset_nf:
843 return {Set, NoFence, false};
844 case Builtin::BI_interlockedbittestandreset_acq:
845 return {Reset, Acquire, false};
846 case Builtin::BI_interlockedbittestandreset_rel:
847 return {Reset, Release, false};
848 case Builtin::BI_interlockedbittestandreset_nf:
849 return {Reset, NoFence, false};
850 }
851 llvm_unreachable("expected only bittest intrinsics");
852 }
853
bitActionToX86BTCode(BitTest::ActionKind A)854 static char bitActionToX86BTCode(BitTest::ActionKind A) {
855 switch (A) {
856 case BitTest::TestOnly: return '\0';
857 case BitTest::Complement: return 'c';
858 case BitTest::Reset: return 'r';
859 case BitTest::Set: return 's';
860 }
861 llvm_unreachable("invalid action");
862 }
863
EmitX86BitTestIntrinsic(CodeGenFunction & CGF,BitTest BT,const CallExpr * E,Value * BitBase,Value * BitPos)864 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
865 BitTest BT,
866 const CallExpr *E, Value *BitBase,
867 Value *BitPos) {
868 char Action = bitActionToX86BTCode(BT.Action);
869 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
870
871 // Build the assembly.
872 SmallString<64> Asm;
873 raw_svector_ostream AsmOS(Asm);
874 if (BT.Interlocking != BitTest::Unlocked)
875 AsmOS << "lock ";
876 AsmOS << "bt";
877 if (Action)
878 AsmOS << Action;
879 AsmOS << SizeSuffix << " $2, ($1)";
880
881 // Build the constraints. FIXME: We should support immediates when possible.
882 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
883 std::string MachineClobbers = CGF.getTarget().getClobbers();
884 if (!MachineClobbers.empty()) {
885 Constraints += ',';
886 Constraints += MachineClobbers;
887 }
888 llvm::IntegerType *IntType = llvm::IntegerType::get(
889 CGF.getLLVMContext(),
890 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
891 llvm::Type *IntPtrType = IntType->getPointerTo();
892 llvm::FunctionType *FTy =
893 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
894
895 llvm::InlineAsm *IA =
896 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
897 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
898 }
899
900 static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I)901 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
902 switch (I) {
903 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
904 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
905 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
906 case BitTest::Release: return llvm::AtomicOrdering::Release;
907 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
908 }
909 llvm_unreachable("invalid interlocking");
910 }
911
912 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
913 /// bits and a bit position and read and optionally modify the bit at that
914 /// position. The position index can be arbitrarily large, i.e. it can be larger
915 /// than 31 or 63, so we need an indexed load in the general case.
EmitBitTestIntrinsic(CodeGenFunction & CGF,unsigned BuiltinID,const CallExpr * E)916 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
917 unsigned BuiltinID,
918 const CallExpr *E) {
919 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
920 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
921
922 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
923
924 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
925 // indexing operation internally. Use them if possible.
926 if (CGF.getTarget().getTriple().isX86())
927 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
928
929 // Otherwise, use generic code to load one byte and test the bit. Use all but
930 // the bottom three bits as the array index, and the bottom three bits to form
931 // a mask.
932 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
933 Value *ByteIndex = CGF.Builder.CreateAShr(
934 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
935 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
936 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
937 ByteIndex, "bittest.byteaddr"),
938 CharUnits::One());
939 Value *PosLow =
940 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
941 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
942
943 // The updating instructions will need a mask.
944 Value *Mask = nullptr;
945 if (BT.Action != BitTest::TestOnly) {
946 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
947 "bittest.mask");
948 }
949
950 // Check the action and ordering of the interlocked intrinsics.
951 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
952
953 Value *OldByte = nullptr;
954 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
955 // Emit a combined atomicrmw load/store operation for the interlocked
956 // intrinsics.
957 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
958 if (BT.Action == BitTest::Reset) {
959 Mask = CGF.Builder.CreateNot(Mask);
960 RMWOp = llvm::AtomicRMWInst::And;
961 }
962 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
963 Ordering);
964 } else {
965 // Emit a plain load for the non-interlocked intrinsics.
966 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
967 Value *NewByte = nullptr;
968 switch (BT.Action) {
969 case BitTest::TestOnly:
970 // Don't store anything.
971 break;
972 case BitTest::Complement:
973 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
974 break;
975 case BitTest::Reset:
976 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
977 break;
978 case BitTest::Set:
979 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
980 break;
981 }
982 if (NewByte)
983 CGF.Builder.CreateStore(NewByte, ByteAddr);
984 }
985
986 // However we loaded the old byte, either by plain load or atomicrmw, shift
987 // the bit into the low position and mask it to 0 or 1.
988 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
989 return CGF.Builder.CreateAnd(
990 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
991 }
992
993 namespace {
994 enum class MSVCSetJmpKind {
995 _setjmpex,
996 _setjmp3,
997 _setjmp
998 };
999 }
1000
1001 /// MSVC handles setjmp a bit differently on different platforms. On every
1002 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1003 /// parameters can be passed as variadic arguments, but we always pass none.
EmitMSVCRTSetJmp(CodeGenFunction & CGF,MSVCSetJmpKind SJKind,const CallExpr * E)1004 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1005 const CallExpr *E) {
1006 llvm::Value *Arg1 = nullptr;
1007 llvm::Type *Arg1Ty = nullptr;
1008 StringRef Name;
1009 bool IsVarArg = false;
1010 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1011 Name = "_setjmp3";
1012 Arg1Ty = CGF.Int32Ty;
1013 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1014 IsVarArg = true;
1015 } else {
1016 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1017 Arg1Ty = CGF.Int8PtrTy;
1018 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1019 Arg1 = CGF.Builder.CreateCall(
1020 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1021 } else
1022 Arg1 = CGF.Builder.CreateCall(
1023 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1024 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1025 }
1026
1027 // Mark the call site and declaration with ReturnsTwice.
1028 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1029 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1030 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1031 llvm::Attribute::ReturnsTwice);
1032 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1033 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1034 ReturnsTwiceAttr, /*Local=*/true);
1035
1036 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1037 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1038 llvm::Value *Args[] = {Buf, Arg1};
1039 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1040 CB->setAttributes(ReturnsTwiceAttr);
1041 return RValue::get(CB);
1042 }
1043
1044 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1045 // we handle them here.
1046 enum class CodeGenFunction::MSVCIntrin {
1047 _BitScanForward,
1048 _BitScanReverse,
1049 _InterlockedAnd,
1050 _InterlockedDecrement,
1051 _InterlockedExchange,
1052 _InterlockedExchangeAdd,
1053 _InterlockedExchangeSub,
1054 _InterlockedIncrement,
1055 _InterlockedOr,
1056 _InterlockedXor,
1057 _InterlockedExchangeAdd_acq,
1058 _InterlockedExchangeAdd_rel,
1059 _InterlockedExchangeAdd_nf,
1060 _InterlockedExchange_acq,
1061 _InterlockedExchange_rel,
1062 _InterlockedExchange_nf,
1063 _InterlockedCompareExchange_acq,
1064 _InterlockedCompareExchange_rel,
1065 _InterlockedCompareExchange_nf,
1066 _InterlockedCompareExchange128,
1067 _InterlockedCompareExchange128_acq,
1068 _InterlockedCompareExchange128_rel,
1069 _InterlockedCompareExchange128_nf,
1070 _InterlockedOr_acq,
1071 _InterlockedOr_rel,
1072 _InterlockedOr_nf,
1073 _InterlockedXor_acq,
1074 _InterlockedXor_rel,
1075 _InterlockedXor_nf,
1076 _InterlockedAnd_acq,
1077 _InterlockedAnd_rel,
1078 _InterlockedAnd_nf,
1079 _InterlockedIncrement_acq,
1080 _InterlockedIncrement_rel,
1081 _InterlockedIncrement_nf,
1082 _InterlockedDecrement_acq,
1083 _InterlockedDecrement_rel,
1084 _InterlockedDecrement_nf,
1085 __fastfail,
1086 };
1087
1088 static Optional<CodeGenFunction::MSVCIntrin>
translateArmToMsvcIntrin(unsigned BuiltinID)1089 translateArmToMsvcIntrin(unsigned BuiltinID) {
1090 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1091 switch (BuiltinID) {
1092 default:
1093 return None;
1094 case ARM::BI_BitScanForward:
1095 case ARM::BI_BitScanForward64:
1096 return MSVCIntrin::_BitScanForward;
1097 case ARM::BI_BitScanReverse:
1098 case ARM::BI_BitScanReverse64:
1099 return MSVCIntrin::_BitScanReverse;
1100 case ARM::BI_InterlockedAnd64:
1101 return MSVCIntrin::_InterlockedAnd;
1102 case ARM::BI_InterlockedExchange64:
1103 return MSVCIntrin::_InterlockedExchange;
1104 case ARM::BI_InterlockedExchangeAdd64:
1105 return MSVCIntrin::_InterlockedExchangeAdd;
1106 case ARM::BI_InterlockedExchangeSub64:
1107 return MSVCIntrin::_InterlockedExchangeSub;
1108 case ARM::BI_InterlockedOr64:
1109 return MSVCIntrin::_InterlockedOr;
1110 case ARM::BI_InterlockedXor64:
1111 return MSVCIntrin::_InterlockedXor;
1112 case ARM::BI_InterlockedDecrement64:
1113 return MSVCIntrin::_InterlockedDecrement;
1114 case ARM::BI_InterlockedIncrement64:
1115 return MSVCIntrin::_InterlockedIncrement;
1116 case ARM::BI_InterlockedExchangeAdd8_acq:
1117 case ARM::BI_InterlockedExchangeAdd16_acq:
1118 case ARM::BI_InterlockedExchangeAdd_acq:
1119 case ARM::BI_InterlockedExchangeAdd64_acq:
1120 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1121 case ARM::BI_InterlockedExchangeAdd8_rel:
1122 case ARM::BI_InterlockedExchangeAdd16_rel:
1123 case ARM::BI_InterlockedExchangeAdd_rel:
1124 case ARM::BI_InterlockedExchangeAdd64_rel:
1125 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1126 case ARM::BI_InterlockedExchangeAdd8_nf:
1127 case ARM::BI_InterlockedExchangeAdd16_nf:
1128 case ARM::BI_InterlockedExchangeAdd_nf:
1129 case ARM::BI_InterlockedExchangeAdd64_nf:
1130 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1131 case ARM::BI_InterlockedExchange8_acq:
1132 case ARM::BI_InterlockedExchange16_acq:
1133 case ARM::BI_InterlockedExchange_acq:
1134 case ARM::BI_InterlockedExchange64_acq:
1135 return MSVCIntrin::_InterlockedExchange_acq;
1136 case ARM::BI_InterlockedExchange8_rel:
1137 case ARM::BI_InterlockedExchange16_rel:
1138 case ARM::BI_InterlockedExchange_rel:
1139 case ARM::BI_InterlockedExchange64_rel:
1140 return MSVCIntrin::_InterlockedExchange_rel;
1141 case ARM::BI_InterlockedExchange8_nf:
1142 case ARM::BI_InterlockedExchange16_nf:
1143 case ARM::BI_InterlockedExchange_nf:
1144 case ARM::BI_InterlockedExchange64_nf:
1145 return MSVCIntrin::_InterlockedExchange_nf;
1146 case ARM::BI_InterlockedCompareExchange8_acq:
1147 case ARM::BI_InterlockedCompareExchange16_acq:
1148 case ARM::BI_InterlockedCompareExchange_acq:
1149 case ARM::BI_InterlockedCompareExchange64_acq:
1150 return MSVCIntrin::_InterlockedCompareExchange_acq;
1151 case ARM::BI_InterlockedCompareExchange8_rel:
1152 case ARM::BI_InterlockedCompareExchange16_rel:
1153 case ARM::BI_InterlockedCompareExchange_rel:
1154 case ARM::BI_InterlockedCompareExchange64_rel:
1155 return MSVCIntrin::_InterlockedCompareExchange_rel;
1156 case ARM::BI_InterlockedCompareExchange8_nf:
1157 case ARM::BI_InterlockedCompareExchange16_nf:
1158 case ARM::BI_InterlockedCompareExchange_nf:
1159 case ARM::BI_InterlockedCompareExchange64_nf:
1160 return MSVCIntrin::_InterlockedCompareExchange_nf;
1161 case ARM::BI_InterlockedOr8_acq:
1162 case ARM::BI_InterlockedOr16_acq:
1163 case ARM::BI_InterlockedOr_acq:
1164 case ARM::BI_InterlockedOr64_acq:
1165 return MSVCIntrin::_InterlockedOr_acq;
1166 case ARM::BI_InterlockedOr8_rel:
1167 case ARM::BI_InterlockedOr16_rel:
1168 case ARM::BI_InterlockedOr_rel:
1169 case ARM::BI_InterlockedOr64_rel:
1170 return MSVCIntrin::_InterlockedOr_rel;
1171 case ARM::BI_InterlockedOr8_nf:
1172 case ARM::BI_InterlockedOr16_nf:
1173 case ARM::BI_InterlockedOr_nf:
1174 case ARM::BI_InterlockedOr64_nf:
1175 return MSVCIntrin::_InterlockedOr_nf;
1176 case ARM::BI_InterlockedXor8_acq:
1177 case ARM::BI_InterlockedXor16_acq:
1178 case ARM::BI_InterlockedXor_acq:
1179 case ARM::BI_InterlockedXor64_acq:
1180 return MSVCIntrin::_InterlockedXor_acq;
1181 case ARM::BI_InterlockedXor8_rel:
1182 case ARM::BI_InterlockedXor16_rel:
1183 case ARM::BI_InterlockedXor_rel:
1184 case ARM::BI_InterlockedXor64_rel:
1185 return MSVCIntrin::_InterlockedXor_rel;
1186 case ARM::BI_InterlockedXor8_nf:
1187 case ARM::BI_InterlockedXor16_nf:
1188 case ARM::BI_InterlockedXor_nf:
1189 case ARM::BI_InterlockedXor64_nf:
1190 return MSVCIntrin::_InterlockedXor_nf;
1191 case ARM::BI_InterlockedAnd8_acq:
1192 case ARM::BI_InterlockedAnd16_acq:
1193 case ARM::BI_InterlockedAnd_acq:
1194 case ARM::BI_InterlockedAnd64_acq:
1195 return MSVCIntrin::_InterlockedAnd_acq;
1196 case ARM::BI_InterlockedAnd8_rel:
1197 case ARM::BI_InterlockedAnd16_rel:
1198 case ARM::BI_InterlockedAnd_rel:
1199 case ARM::BI_InterlockedAnd64_rel:
1200 return MSVCIntrin::_InterlockedAnd_rel;
1201 case ARM::BI_InterlockedAnd8_nf:
1202 case ARM::BI_InterlockedAnd16_nf:
1203 case ARM::BI_InterlockedAnd_nf:
1204 case ARM::BI_InterlockedAnd64_nf:
1205 return MSVCIntrin::_InterlockedAnd_nf;
1206 case ARM::BI_InterlockedIncrement16_acq:
1207 case ARM::BI_InterlockedIncrement_acq:
1208 case ARM::BI_InterlockedIncrement64_acq:
1209 return MSVCIntrin::_InterlockedIncrement_acq;
1210 case ARM::BI_InterlockedIncrement16_rel:
1211 case ARM::BI_InterlockedIncrement_rel:
1212 case ARM::BI_InterlockedIncrement64_rel:
1213 return MSVCIntrin::_InterlockedIncrement_rel;
1214 case ARM::BI_InterlockedIncrement16_nf:
1215 case ARM::BI_InterlockedIncrement_nf:
1216 case ARM::BI_InterlockedIncrement64_nf:
1217 return MSVCIntrin::_InterlockedIncrement_nf;
1218 case ARM::BI_InterlockedDecrement16_acq:
1219 case ARM::BI_InterlockedDecrement_acq:
1220 case ARM::BI_InterlockedDecrement64_acq:
1221 return MSVCIntrin::_InterlockedDecrement_acq;
1222 case ARM::BI_InterlockedDecrement16_rel:
1223 case ARM::BI_InterlockedDecrement_rel:
1224 case ARM::BI_InterlockedDecrement64_rel:
1225 return MSVCIntrin::_InterlockedDecrement_rel;
1226 case ARM::BI_InterlockedDecrement16_nf:
1227 case ARM::BI_InterlockedDecrement_nf:
1228 case ARM::BI_InterlockedDecrement64_nf:
1229 return MSVCIntrin::_InterlockedDecrement_nf;
1230 }
1231 llvm_unreachable("must return from switch");
1232 }
1233
1234 static Optional<CodeGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID)1235 translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1236 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1237 switch (BuiltinID) {
1238 default:
1239 return None;
1240 case AArch64::BI_BitScanForward:
1241 case AArch64::BI_BitScanForward64:
1242 return MSVCIntrin::_BitScanForward;
1243 case AArch64::BI_BitScanReverse:
1244 case AArch64::BI_BitScanReverse64:
1245 return MSVCIntrin::_BitScanReverse;
1246 case AArch64::BI_InterlockedAnd64:
1247 return MSVCIntrin::_InterlockedAnd;
1248 case AArch64::BI_InterlockedExchange64:
1249 return MSVCIntrin::_InterlockedExchange;
1250 case AArch64::BI_InterlockedExchangeAdd64:
1251 return MSVCIntrin::_InterlockedExchangeAdd;
1252 case AArch64::BI_InterlockedExchangeSub64:
1253 return MSVCIntrin::_InterlockedExchangeSub;
1254 case AArch64::BI_InterlockedOr64:
1255 return MSVCIntrin::_InterlockedOr;
1256 case AArch64::BI_InterlockedXor64:
1257 return MSVCIntrin::_InterlockedXor;
1258 case AArch64::BI_InterlockedDecrement64:
1259 return MSVCIntrin::_InterlockedDecrement;
1260 case AArch64::BI_InterlockedIncrement64:
1261 return MSVCIntrin::_InterlockedIncrement;
1262 case AArch64::BI_InterlockedExchangeAdd8_acq:
1263 case AArch64::BI_InterlockedExchangeAdd16_acq:
1264 case AArch64::BI_InterlockedExchangeAdd_acq:
1265 case AArch64::BI_InterlockedExchangeAdd64_acq:
1266 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1267 case AArch64::BI_InterlockedExchangeAdd8_rel:
1268 case AArch64::BI_InterlockedExchangeAdd16_rel:
1269 case AArch64::BI_InterlockedExchangeAdd_rel:
1270 case AArch64::BI_InterlockedExchangeAdd64_rel:
1271 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1272 case AArch64::BI_InterlockedExchangeAdd8_nf:
1273 case AArch64::BI_InterlockedExchangeAdd16_nf:
1274 case AArch64::BI_InterlockedExchangeAdd_nf:
1275 case AArch64::BI_InterlockedExchangeAdd64_nf:
1276 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1277 case AArch64::BI_InterlockedExchange8_acq:
1278 case AArch64::BI_InterlockedExchange16_acq:
1279 case AArch64::BI_InterlockedExchange_acq:
1280 case AArch64::BI_InterlockedExchange64_acq:
1281 return MSVCIntrin::_InterlockedExchange_acq;
1282 case AArch64::BI_InterlockedExchange8_rel:
1283 case AArch64::BI_InterlockedExchange16_rel:
1284 case AArch64::BI_InterlockedExchange_rel:
1285 case AArch64::BI_InterlockedExchange64_rel:
1286 return MSVCIntrin::_InterlockedExchange_rel;
1287 case AArch64::BI_InterlockedExchange8_nf:
1288 case AArch64::BI_InterlockedExchange16_nf:
1289 case AArch64::BI_InterlockedExchange_nf:
1290 case AArch64::BI_InterlockedExchange64_nf:
1291 return MSVCIntrin::_InterlockedExchange_nf;
1292 case AArch64::BI_InterlockedCompareExchange8_acq:
1293 case AArch64::BI_InterlockedCompareExchange16_acq:
1294 case AArch64::BI_InterlockedCompareExchange_acq:
1295 case AArch64::BI_InterlockedCompareExchange64_acq:
1296 return MSVCIntrin::_InterlockedCompareExchange_acq;
1297 case AArch64::BI_InterlockedCompareExchange8_rel:
1298 case AArch64::BI_InterlockedCompareExchange16_rel:
1299 case AArch64::BI_InterlockedCompareExchange_rel:
1300 case AArch64::BI_InterlockedCompareExchange64_rel:
1301 return MSVCIntrin::_InterlockedCompareExchange_rel;
1302 case AArch64::BI_InterlockedCompareExchange8_nf:
1303 case AArch64::BI_InterlockedCompareExchange16_nf:
1304 case AArch64::BI_InterlockedCompareExchange_nf:
1305 case AArch64::BI_InterlockedCompareExchange64_nf:
1306 return MSVCIntrin::_InterlockedCompareExchange_nf;
1307 case AArch64::BI_InterlockedCompareExchange128:
1308 return MSVCIntrin::_InterlockedCompareExchange128;
1309 case AArch64::BI_InterlockedCompareExchange128_acq:
1310 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1311 case AArch64::BI_InterlockedCompareExchange128_nf:
1312 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1313 case AArch64::BI_InterlockedCompareExchange128_rel:
1314 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1315 case AArch64::BI_InterlockedOr8_acq:
1316 case AArch64::BI_InterlockedOr16_acq:
1317 case AArch64::BI_InterlockedOr_acq:
1318 case AArch64::BI_InterlockedOr64_acq:
1319 return MSVCIntrin::_InterlockedOr_acq;
1320 case AArch64::BI_InterlockedOr8_rel:
1321 case AArch64::BI_InterlockedOr16_rel:
1322 case AArch64::BI_InterlockedOr_rel:
1323 case AArch64::BI_InterlockedOr64_rel:
1324 return MSVCIntrin::_InterlockedOr_rel;
1325 case AArch64::BI_InterlockedOr8_nf:
1326 case AArch64::BI_InterlockedOr16_nf:
1327 case AArch64::BI_InterlockedOr_nf:
1328 case AArch64::BI_InterlockedOr64_nf:
1329 return MSVCIntrin::_InterlockedOr_nf;
1330 case AArch64::BI_InterlockedXor8_acq:
1331 case AArch64::BI_InterlockedXor16_acq:
1332 case AArch64::BI_InterlockedXor_acq:
1333 case AArch64::BI_InterlockedXor64_acq:
1334 return MSVCIntrin::_InterlockedXor_acq;
1335 case AArch64::BI_InterlockedXor8_rel:
1336 case AArch64::BI_InterlockedXor16_rel:
1337 case AArch64::BI_InterlockedXor_rel:
1338 case AArch64::BI_InterlockedXor64_rel:
1339 return MSVCIntrin::_InterlockedXor_rel;
1340 case AArch64::BI_InterlockedXor8_nf:
1341 case AArch64::BI_InterlockedXor16_nf:
1342 case AArch64::BI_InterlockedXor_nf:
1343 case AArch64::BI_InterlockedXor64_nf:
1344 return MSVCIntrin::_InterlockedXor_nf;
1345 case AArch64::BI_InterlockedAnd8_acq:
1346 case AArch64::BI_InterlockedAnd16_acq:
1347 case AArch64::BI_InterlockedAnd_acq:
1348 case AArch64::BI_InterlockedAnd64_acq:
1349 return MSVCIntrin::_InterlockedAnd_acq;
1350 case AArch64::BI_InterlockedAnd8_rel:
1351 case AArch64::BI_InterlockedAnd16_rel:
1352 case AArch64::BI_InterlockedAnd_rel:
1353 case AArch64::BI_InterlockedAnd64_rel:
1354 return MSVCIntrin::_InterlockedAnd_rel;
1355 case AArch64::BI_InterlockedAnd8_nf:
1356 case AArch64::BI_InterlockedAnd16_nf:
1357 case AArch64::BI_InterlockedAnd_nf:
1358 case AArch64::BI_InterlockedAnd64_nf:
1359 return MSVCIntrin::_InterlockedAnd_nf;
1360 case AArch64::BI_InterlockedIncrement16_acq:
1361 case AArch64::BI_InterlockedIncrement_acq:
1362 case AArch64::BI_InterlockedIncrement64_acq:
1363 return MSVCIntrin::_InterlockedIncrement_acq;
1364 case AArch64::BI_InterlockedIncrement16_rel:
1365 case AArch64::BI_InterlockedIncrement_rel:
1366 case AArch64::BI_InterlockedIncrement64_rel:
1367 return MSVCIntrin::_InterlockedIncrement_rel;
1368 case AArch64::BI_InterlockedIncrement16_nf:
1369 case AArch64::BI_InterlockedIncrement_nf:
1370 case AArch64::BI_InterlockedIncrement64_nf:
1371 return MSVCIntrin::_InterlockedIncrement_nf;
1372 case AArch64::BI_InterlockedDecrement16_acq:
1373 case AArch64::BI_InterlockedDecrement_acq:
1374 case AArch64::BI_InterlockedDecrement64_acq:
1375 return MSVCIntrin::_InterlockedDecrement_acq;
1376 case AArch64::BI_InterlockedDecrement16_rel:
1377 case AArch64::BI_InterlockedDecrement_rel:
1378 case AArch64::BI_InterlockedDecrement64_rel:
1379 return MSVCIntrin::_InterlockedDecrement_rel;
1380 case AArch64::BI_InterlockedDecrement16_nf:
1381 case AArch64::BI_InterlockedDecrement_nf:
1382 case AArch64::BI_InterlockedDecrement64_nf:
1383 return MSVCIntrin::_InterlockedDecrement_nf;
1384 }
1385 llvm_unreachable("must return from switch");
1386 }
1387
1388 static Optional<CodeGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID)1389 translateX86ToMsvcIntrin(unsigned BuiltinID) {
1390 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1391 switch (BuiltinID) {
1392 default:
1393 return None;
1394 case clang::X86::BI_BitScanForward:
1395 case clang::X86::BI_BitScanForward64:
1396 return MSVCIntrin::_BitScanForward;
1397 case clang::X86::BI_BitScanReverse:
1398 case clang::X86::BI_BitScanReverse64:
1399 return MSVCIntrin::_BitScanReverse;
1400 case clang::X86::BI_InterlockedAnd64:
1401 return MSVCIntrin::_InterlockedAnd;
1402 case clang::X86::BI_InterlockedCompareExchange128:
1403 return MSVCIntrin::_InterlockedCompareExchange128;
1404 case clang::X86::BI_InterlockedExchange64:
1405 return MSVCIntrin::_InterlockedExchange;
1406 case clang::X86::BI_InterlockedExchangeAdd64:
1407 return MSVCIntrin::_InterlockedExchangeAdd;
1408 case clang::X86::BI_InterlockedExchangeSub64:
1409 return MSVCIntrin::_InterlockedExchangeSub;
1410 case clang::X86::BI_InterlockedOr64:
1411 return MSVCIntrin::_InterlockedOr;
1412 case clang::X86::BI_InterlockedXor64:
1413 return MSVCIntrin::_InterlockedXor;
1414 case clang::X86::BI_InterlockedDecrement64:
1415 return MSVCIntrin::_InterlockedDecrement;
1416 case clang::X86::BI_InterlockedIncrement64:
1417 return MSVCIntrin::_InterlockedIncrement;
1418 }
1419 llvm_unreachable("must return from switch");
1420 }
1421
1422 // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,const CallExpr * E)1423 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1424 const CallExpr *E) {
1425 switch (BuiltinID) {
1426 case MSVCIntrin::_BitScanForward:
1427 case MSVCIntrin::_BitScanReverse: {
1428 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1429 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1430
1431 llvm::Type *ArgType = ArgValue->getType();
1432 llvm::Type *IndexType =
1433 IndexAddress.getPointer()->getType()->getPointerElementType();
1434 llvm::Type *ResultType = ConvertType(E->getType());
1435
1436 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1437 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1438 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1439
1440 BasicBlock *Begin = Builder.GetInsertBlock();
1441 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1442 Builder.SetInsertPoint(End);
1443 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1444
1445 Builder.SetInsertPoint(Begin);
1446 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1447 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1448 Builder.CreateCondBr(IsZero, End, NotZero);
1449 Result->addIncoming(ResZero, Begin);
1450
1451 Builder.SetInsertPoint(NotZero);
1452
1453 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1454 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1455 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1456 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1457 Builder.CreateStore(ZeroCount, IndexAddress, false);
1458 } else {
1459 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1460 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1461
1462 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1463 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1464 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1465 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1466 Builder.CreateStore(Index, IndexAddress, false);
1467 }
1468 Builder.CreateBr(End);
1469 Result->addIncoming(ResOne, NotZero);
1470
1471 Builder.SetInsertPoint(End);
1472 return Result;
1473 }
1474 case MSVCIntrin::_InterlockedAnd:
1475 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1476 case MSVCIntrin::_InterlockedExchange:
1477 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1478 case MSVCIntrin::_InterlockedExchangeAdd:
1479 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1480 case MSVCIntrin::_InterlockedExchangeSub:
1481 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1482 case MSVCIntrin::_InterlockedOr:
1483 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1484 case MSVCIntrin::_InterlockedXor:
1485 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1486 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1487 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1488 AtomicOrdering::Acquire);
1489 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1490 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1491 AtomicOrdering::Release);
1492 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1493 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1494 AtomicOrdering::Monotonic);
1495 case MSVCIntrin::_InterlockedExchange_acq:
1496 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1497 AtomicOrdering::Acquire);
1498 case MSVCIntrin::_InterlockedExchange_rel:
1499 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1500 AtomicOrdering::Release);
1501 case MSVCIntrin::_InterlockedExchange_nf:
1502 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1503 AtomicOrdering::Monotonic);
1504 case MSVCIntrin::_InterlockedCompareExchange_acq:
1505 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1506 case MSVCIntrin::_InterlockedCompareExchange_rel:
1507 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1508 case MSVCIntrin::_InterlockedCompareExchange_nf:
1509 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1510 case MSVCIntrin::_InterlockedCompareExchange128:
1511 return EmitAtomicCmpXchg128ForMSIntrin(
1512 *this, E, AtomicOrdering::SequentiallyConsistent);
1513 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1514 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1515 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1516 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1517 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1518 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1519 case MSVCIntrin::_InterlockedOr_acq:
1520 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1521 AtomicOrdering::Acquire);
1522 case MSVCIntrin::_InterlockedOr_rel:
1523 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1524 AtomicOrdering::Release);
1525 case MSVCIntrin::_InterlockedOr_nf:
1526 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1527 AtomicOrdering::Monotonic);
1528 case MSVCIntrin::_InterlockedXor_acq:
1529 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1530 AtomicOrdering::Acquire);
1531 case MSVCIntrin::_InterlockedXor_rel:
1532 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1533 AtomicOrdering::Release);
1534 case MSVCIntrin::_InterlockedXor_nf:
1535 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1536 AtomicOrdering::Monotonic);
1537 case MSVCIntrin::_InterlockedAnd_acq:
1538 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1539 AtomicOrdering::Acquire);
1540 case MSVCIntrin::_InterlockedAnd_rel:
1541 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1542 AtomicOrdering::Release);
1543 case MSVCIntrin::_InterlockedAnd_nf:
1544 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1545 AtomicOrdering::Monotonic);
1546 case MSVCIntrin::_InterlockedIncrement_acq:
1547 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1548 case MSVCIntrin::_InterlockedIncrement_rel:
1549 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1550 case MSVCIntrin::_InterlockedIncrement_nf:
1551 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1552 case MSVCIntrin::_InterlockedDecrement_acq:
1553 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1554 case MSVCIntrin::_InterlockedDecrement_rel:
1555 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1556 case MSVCIntrin::_InterlockedDecrement_nf:
1557 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1558
1559 case MSVCIntrin::_InterlockedDecrement:
1560 return EmitAtomicDecrementValue(*this, E);
1561 case MSVCIntrin::_InterlockedIncrement:
1562 return EmitAtomicIncrementValue(*this, E);
1563
1564 case MSVCIntrin::__fastfail: {
1565 // Request immediate process termination from the kernel. The instruction
1566 // sequences to do this are documented on MSDN:
1567 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1568 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1569 StringRef Asm, Constraints;
1570 switch (ISA) {
1571 default:
1572 ErrorUnsupported(E, "__fastfail call for this architecture");
1573 break;
1574 case llvm::Triple::x86:
1575 case llvm::Triple::x86_64:
1576 Asm = "int $$0x29";
1577 Constraints = "{cx}";
1578 break;
1579 case llvm::Triple::thumb:
1580 Asm = "udf #251";
1581 Constraints = "{r0}";
1582 break;
1583 case llvm::Triple::aarch64:
1584 Asm = "brk #0xF003";
1585 Constraints = "{w0}";
1586 }
1587 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1588 llvm::InlineAsm *IA =
1589 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1590 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1591 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1592 llvm::Attribute::NoReturn);
1593 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1594 CI->setAttributes(NoReturnAttr);
1595 return CI;
1596 }
1597 }
1598 llvm_unreachable("Incorrect MSVC intrinsic!");
1599 }
1600
1601 namespace {
1602 // ARC cleanup for __builtin_os_log_format
1603 struct CallObjCArcUse final : EHScopeStack::Cleanup {
CallObjCArcUse__anon777ffcf60411::CallObjCArcUse1604 CallObjCArcUse(llvm::Value *object) : object(object) {}
1605 llvm::Value *object;
1606
Emit__anon777ffcf60411::CallObjCArcUse1607 void Emit(CodeGenFunction &CGF, Flags flags) override {
1608 CGF.EmitARCIntrinsicUse(object);
1609 }
1610 };
1611 }
1612
EmitCheckedArgForBuiltin(const Expr * E,BuiltinCheckKind Kind)1613 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1614 BuiltinCheckKind Kind) {
1615 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1616 && "Unsupported builtin check kind");
1617
1618 Value *ArgValue = EmitScalarExpr(E);
1619 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1620 return ArgValue;
1621
1622 SanitizerScope SanScope(this);
1623 Value *Cond = Builder.CreateICmpNE(
1624 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1625 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1626 SanitizerHandler::InvalidBuiltin,
1627 {EmitCheckSourceLocation(E->getExprLoc()),
1628 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1629 None);
1630 return ArgValue;
1631 }
1632
1633 /// Get the argument type for arguments to os_log_helper.
getOSLogArgType(ASTContext & C,int Size)1634 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1635 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1636 return C.getCanonicalType(UnsignedTy);
1637 }
1638
generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout & Layout,CharUnits BufferAlignment)1639 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1640 const analyze_os_log::OSLogBufferLayout &Layout,
1641 CharUnits BufferAlignment) {
1642 ASTContext &Ctx = getContext();
1643
1644 llvm::SmallString<64> Name;
1645 {
1646 raw_svector_ostream OS(Name);
1647 OS << "__os_log_helper";
1648 OS << "_" << BufferAlignment.getQuantity();
1649 OS << "_" << int(Layout.getSummaryByte());
1650 OS << "_" << int(Layout.getNumArgsByte());
1651 for (const auto &Item : Layout.Items)
1652 OS << "_" << int(Item.getSizeByte()) << "_"
1653 << int(Item.getDescriptorByte());
1654 }
1655
1656 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1657 return F;
1658
1659 llvm::SmallVector<QualType, 4> ArgTys;
1660 FunctionArgList Args;
1661 Args.push_back(ImplicitParamDecl::Create(
1662 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1663 ImplicitParamDecl::Other));
1664 ArgTys.emplace_back(Ctx.VoidPtrTy);
1665
1666 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1667 char Size = Layout.Items[I].getSizeByte();
1668 if (!Size)
1669 continue;
1670
1671 QualType ArgTy = getOSLogArgType(Ctx, Size);
1672 Args.push_back(ImplicitParamDecl::Create(
1673 Ctx, nullptr, SourceLocation(),
1674 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1675 ImplicitParamDecl::Other));
1676 ArgTys.emplace_back(ArgTy);
1677 }
1678
1679 QualType ReturnTy = Ctx.VoidTy;
1680 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1681
1682 // The helper function has linkonce_odr linkage to enable the linker to merge
1683 // identical functions. To ensure the merging always happens, 'noinline' is
1684 // attached to the function when compiling with -Oz.
1685 const CGFunctionInfo &FI =
1686 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1687 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1688 llvm::Function *Fn = llvm::Function::Create(
1689 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1690 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1691 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1692 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1693 Fn->setDoesNotThrow();
1694
1695 // Attach 'noinline' at -Oz.
1696 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1697 Fn->addFnAttr(llvm::Attribute::NoInline);
1698
1699 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1700 IdentifierInfo *II = &Ctx.Idents.get(Name);
1701 FunctionDecl *FD = FunctionDecl::Create(
1702 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1703 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1704 // Avoid generating debug location info for the function.
1705 FD->setImplicit();
1706
1707 StartFunction(FD, ReturnTy, Fn, FI, Args);
1708
1709 // Create a scope with an artificial location for the body of this function.
1710 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1711
1712 CharUnits Offset;
1713 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1714 BufferAlignment);
1715 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1716 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1717 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1718 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1719
1720 unsigned I = 1;
1721 for (const auto &Item : Layout.Items) {
1722 Builder.CreateStore(
1723 Builder.getInt8(Item.getDescriptorByte()),
1724 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1725 Builder.CreateStore(
1726 Builder.getInt8(Item.getSizeByte()),
1727 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1728
1729 CharUnits Size = Item.size();
1730 if (!Size.getQuantity())
1731 continue;
1732
1733 Address Arg = GetAddrOfLocalVar(Args[I]);
1734 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1735 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1736 "argDataCast");
1737 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1738 Offset += Size;
1739 ++I;
1740 }
1741
1742 FinishFunction();
1743
1744 return Fn;
1745 }
1746
emitBuiltinOSLogFormat(const CallExpr & E)1747 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1748 assert(E.getNumArgs() >= 2 &&
1749 "__builtin_os_log_format takes at least 2 arguments");
1750 ASTContext &Ctx = getContext();
1751 analyze_os_log::OSLogBufferLayout Layout;
1752 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1753 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1754 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1755
1756 // Ignore argument 1, the format string. It is not currently used.
1757 CallArgList Args;
1758 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1759
1760 for (const auto &Item : Layout.Items) {
1761 int Size = Item.getSizeByte();
1762 if (!Size)
1763 continue;
1764
1765 llvm::Value *ArgVal;
1766
1767 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1768 uint64_t Val = 0;
1769 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1770 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1771 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1772 } else if (const Expr *TheExpr = Item.getExpr()) {
1773 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1774
1775 // If a temporary object that requires destruction after the full
1776 // expression is passed, push a lifetime-extended cleanup to extend its
1777 // lifetime to the end of the enclosing block scope.
1778 auto LifetimeExtendObject = [&](const Expr *E) {
1779 E = E->IgnoreParenCasts();
1780 // Extend lifetimes of objects returned by function calls and message
1781 // sends.
1782
1783 // FIXME: We should do this in other cases in which temporaries are
1784 // created including arguments of non-ARC types (e.g., C++
1785 // temporaries).
1786 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1787 return true;
1788 return false;
1789 };
1790
1791 if (TheExpr->getType()->isObjCRetainableType() &&
1792 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1793 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1794 "Only scalar can be a ObjC retainable type");
1795 if (!isa<Constant>(ArgVal)) {
1796 CleanupKind Cleanup = getARCCleanupKind();
1797 QualType Ty = TheExpr->getType();
1798 Address Alloca = Address::invalid();
1799 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1800 ArgVal = EmitARCRetain(Ty, ArgVal);
1801 Builder.CreateStore(ArgVal, Addr);
1802 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1803 CodeGenFunction::destroyARCStrongPrecise,
1804 Cleanup & EHCleanup);
1805
1806 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1807 // argument has to be alive.
1808 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1809 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1810 }
1811 }
1812 } else {
1813 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1814 }
1815
1816 unsigned ArgValSize =
1817 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1818 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1819 ArgValSize);
1820 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1821 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1822 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1823 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1824 Args.add(RValue::get(ArgVal), ArgTy);
1825 }
1826
1827 const CGFunctionInfo &FI =
1828 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1829 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1830 Layout, BufAddr.getAlignment());
1831 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1832 return RValue::get(BufAddr.getPointer());
1833 }
1834
1835 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
isSpecialMixedSignMultiply(unsigned BuiltinID,WidthAndSignedness Op1Info,WidthAndSignedness Op2Info,WidthAndSignedness ResultInfo)1836 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1837 WidthAndSignedness Op1Info,
1838 WidthAndSignedness Op2Info,
1839 WidthAndSignedness ResultInfo) {
1840 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1841 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1842 Op1Info.Signed != Op2Info.Signed;
1843 }
1844
1845 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1846 /// the generic checked-binop irgen.
1847 static RValue
EmitCheckedMixedSignMultiply(CodeGenFunction & CGF,const clang::Expr * Op1,WidthAndSignedness Op1Info,const clang::Expr * Op2,WidthAndSignedness Op2Info,const clang::Expr * ResultArg,QualType ResultQTy,WidthAndSignedness ResultInfo)1848 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1849 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1850 WidthAndSignedness Op2Info,
1851 const clang::Expr *ResultArg, QualType ResultQTy,
1852 WidthAndSignedness ResultInfo) {
1853 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1854 Op2Info, ResultInfo) &&
1855 "Not a mixed-sign multipliction we can specialize");
1856
1857 // Emit the signed and unsigned operands.
1858 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1859 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1860 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1861 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1862 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1863 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1864
1865 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1866 if (SignedOpWidth < UnsignedOpWidth)
1867 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1868 if (UnsignedOpWidth < SignedOpWidth)
1869 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1870
1871 llvm::Type *OpTy = Signed->getType();
1872 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1873 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1874 llvm::Type *ResTy = ResultPtr.getElementType();
1875 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1876
1877 // Take the absolute value of the signed operand.
1878 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1879 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1880 llvm::Value *AbsSigned =
1881 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1882
1883 // Perform a checked unsigned multiplication.
1884 llvm::Value *UnsignedOverflow;
1885 llvm::Value *UnsignedResult =
1886 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1887 Unsigned, UnsignedOverflow);
1888
1889 llvm::Value *Overflow, *Result;
1890 if (ResultInfo.Signed) {
1891 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1892 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1893 auto IntMax =
1894 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1895 llvm::Value *MaxResult =
1896 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1897 CGF.Builder.CreateZExt(IsNegative, OpTy));
1898 llvm::Value *SignedOverflow =
1899 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1900 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1901
1902 // Prepare the signed result (possibly by negating it).
1903 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1904 llvm::Value *SignedResult =
1905 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1906 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1907 } else {
1908 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1909 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1910 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1911 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1912 if (ResultInfo.Width < OpWidth) {
1913 auto IntMax =
1914 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1915 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1916 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1917 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1918 }
1919
1920 // Negate the product if it would be negative in infinite precision.
1921 Result = CGF.Builder.CreateSelect(
1922 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1923
1924 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1925 }
1926 assert(Overflow && Result && "Missing overflow or result");
1927
1928 bool isVolatile =
1929 ResultArg->getType()->getPointeeType().isVolatileQualified();
1930 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1931 isVolatile);
1932 return RValue::get(Overflow);
1933 }
1934
dumpRecord(CodeGenFunction & CGF,QualType RType,Value * & RecordPtr,CharUnits Align,llvm::FunctionCallee Func,int Lvl)1935 static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1936 Value *&RecordPtr, CharUnits Align,
1937 llvm::FunctionCallee Func, int Lvl) {
1938 ASTContext &Context = CGF.getContext();
1939 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1940 std::string Pad = std::string(Lvl * 4, ' ');
1941
1942 Value *GString =
1943 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1944 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1945
1946 static llvm::DenseMap<QualType, const char *> Types;
1947 if (Types.empty()) {
1948 Types[Context.CharTy] = "%c";
1949 Types[Context.BoolTy] = "%d";
1950 Types[Context.SignedCharTy] = "%hhd";
1951 Types[Context.UnsignedCharTy] = "%hhu";
1952 Types[Context.IntTy] = "%d";
1953 Types[Context.UnsignedIntTy] = "%u";
1954 Types[Context.LongTy] = "%ld";
1955 Types[Context.UnsignedLongTy] = "%lu";
1956 Types[Context.LongLongTy] = "%lld";
1957 Types[Context.UnsignedLongLongTy] = "%llu";
1958 Types[Context.ShortTy] = "%hd";
1959 Types[Context.UnsignedShortTy] = "%hu";
1960 Types[Context.VoidPtrTy] = "%p";
1961 Types[Context.FloatTy] = "%f";
1962 Types[Context.DoubleTy] = "%f";
1963 Types[Context.LongDoubleTy] = "%Lf";
1964 Types[Context.getPointerType(Context.CharTy)] = "%s";
1965 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1966 }
1967
1968 for (const auto *FD : RD->fields()) {
1969 Value *FieldPtr = RecordPtr;
1970 if (RD->isUnion())
1971 FieldPtr = CGF.Builder.CreatePointerCast(
1972 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1973 else
1974 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1975 FD->getFieldIndex());
1976
1977 GString = CGF.Builder.CreateGlobalStringPtr(
1978 llvm::Twine(Pad)
1979 .concat(FD->getType().getAsString())
1980 .concat(llvm::Twine(' '))
1981 .concat(FD->getNameAsString())
1982 .concat(" : ")
1983 .str());
1984 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1985 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1986
1987 QualType CanonicalType =
1988 FD->getType().getUnqualifiedType().getCanonicalType();
1989
1990 // We check whether we are in a recursive type
1991 if (CanonicalType->isRecordType()) {
1992 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1993 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1994 continue;
1995 }
1996
1997 // We try to determine the best format to print the current field
1998 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1999 ? Types[Context.VoidPtrTy]
2000 : Types[CanonicalType];
2001
2002 Address FieldAddress = Address(FieldPtr, Align);
2003 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
2004
2005 // FIXME Need to handle bitfield here
2006 GString = CGF.Builder.CreateGlobalStringPtr(
2007 Format.concat(llvm::Twine('\n')).str());
2008 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
2009 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2010 }
2011
2012 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
2013 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2014 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2015 return Res;
2016 }
2017
2018 static bool
TypeRequiresBuiltinLaunderImp(const ASTContext & Ctx,QualType Ty,llvm::SmallPtrSetImpl<const Decl * > & Seen)2019 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2020 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2021 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2022 Ty = Ctx.getBaseElementType(Arr);
2023
2024 const auto *Record = Ty->getAsCXXRecordDecl();
2025 if (!Record)
2026 return false;
2027
2028 // We've already checked this type, or are in the process of checking it.
2029 if (!Seen.insert(Record).second)
2030 return false;
2031
2032 assert(Record->hasDefinition() &&
2033 "Incomplete types should already be diagnosed");
2034
2035 if (Record->isDynamicClass())
2036 return true;
2037
2038 for (FieldDecl *F : Record->fields()) {
2039 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2040 return true;
2041 }
2042 return false;
2043 }
2044
2045 /// Determine if the specified type requires laundering by checking if it is a
2046 /// dynamic class type or contains a subobject which is a dynamic class type.
TypeRequiresBuiltinLaunder(CodeGenModule & CGM,QualType Ty)2047 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2048 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2049 return false;
2050 llvm::SmallPtrSet<const Decl *, 16> Seen;
2051 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2052 }
2053
emitRotate(const CallExpr * E,bool IsRotateRight)2054 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2055 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2056 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2057
2058 // The builtin's shift arg may have a different type than the source arg and
2059 // result, but the LLVM intrinsic uses the same type for all values.
2060 llvm::Type *Ty = Src->getType();
2061 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2062
2063 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2064 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2065 Function *F = CGM.getIntrinsic(IID, Ty);
2066 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2067 }
2068
EmitBuiltinExpr(const GlobalDecl GD,unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue)2069 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2070 const CallExpr *E,
2071 ReturnValueSlot ReturnValue) {
2072 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2073 // See if we can constant fold this builtin. If so, don't emit it at all.
2074 Expr::EvalResult Result;
2075 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
2076 !Result.hasSideEffects()) {
2077 if (Result.Val.isInt())
2078 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2079 Result.Val.getInt()));
2080 if (Result.Val.isFloat())
2081 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2082 Result.Val.getFloat()));
2083 }
2084
2085 // If the builtin has been declared explicitly with an assembler label,
2086 // disable the specialized emitting below. Ideally we should communicate the
2087 // rename in IR, or at least avoid generating the intrinsic calls that are
2088 // likely to get lowered to the renamed library functions.
2089 const unsigned BuiltinIDIfNoAsmLabel =
2090 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2091
2092 // There are LLVM math intrinsics/instructions corresponding to math library
2093 // functions except the LLVM op will never set errno while the math library
2094 // might. Also, math builtins have the same semantics as their math library
2095 // twins. Thus, we can transform math library and builtin calls to their
2096 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2097 if (FD->hasAttr<ConstAttr>()) {
2098 switch (BuiltinIDIfNoAsmLabel) {
2099 case Builtin::BIceil:
2100 case Builtin::BIceilf:
2101 case Builtin::BIceill:
2102 case Builtin::BI__builtin_ceil:
2103 case Builtin::BI__builtin_ceilf:
2104 case Builtin::BI__builtin_ceilf16:
2105 case Builtin::BI__builtin_ceill:
2106 case Builtin::BI__builtin_ceilf128:
2107 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2108 Intrinsic::ceil,
2109 Intrinsic::experimental_constrained_ceil));
2110
2111 case Builtin::BIcopysign:
2112 case Builtin::BIcopysignf:
2113 case Builtin::BIcopysignl:
2114 case Builtin::BI__builtin_copysign:
2115 case Builtin::BI__builtin_copysignf:
2116 case Builtin::BI__builtin_copysignf16:
2117 case Builtin::BI__builtin_copysignl:
2118 case Builtin::BI__builtin_copysignf128:
2119 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2120
2121 case Builtin::BIcos:
2122 case Builtin::BIcosf:
2123 case Builtin::BIcosl:
2124 case Builtin::BI__builtin_cos:
2125 case Builtin::BI__builtin_cosf:
2126 case Builtin::BI__builtin_cosf16:
2127 case Builtin::BI__builtin_cosl:
2128 case Builtin::BI__builtin_cosf128:
2129 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2130 Intrinsic::cos,
2131 Intrinsic::experimental_constrained_cos));
2132
2133 case Builtin::BIexp:
2134 case Builtin::BIexpf:
2135 case Builtin::BIexpl:
2136 case Builtin::BI__builtin_exp:
2137 case Builtin::BI__builtin_expf:
2138 case Builtin::BI__builtin_expf16:
2139 case Builtin::BI__builtin_expl:
2140 case Builtin::BI__builtin_expf128:
2141 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2142 Intrinsic::exp,
2143 Intrinsic::experimental_constrained_exp));
2144
2145 case Builtin::BIexp2:
2146 case Builtin::BIexp2f:
2147 case Builtin::BIexp2l:
2148 case Builtin::BI__builtin_exp2:
2149 case Builtin::BI__builtin_exp2f:
2150 case Builtin::BI__builtin_exp2f16:
2151 case Builtin::BI__builtin_exp2l:
2152 case Builtin::BI__builtin_exp2f128:
2153 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2154 Intrinsic::exp2,
2155 Intrinsic::experimental_constrained_exp2));
2156
2157 case Builtin::BIfabs:
2158 case Builtin::BIfabsf:
2159 case Builtin::BIfabsl:
2160 case Builtin::BI__builtin_fabs:
2161 case Builtin::BI__builtin_fabsf:
2162 case Builtin::BI__builtin_fabsf16:
2163 case Builtin::BI__builtin_fabsl:
2164 case Builtin::BI__builtin_fabsf128:
2165 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2166
2167 case Builtin::BIfloor:
2168 case Builtin::BIfloorf:
2169 case Builtin::BIfloorl:
2170 case Builtin::BI__builtin_floor:
2171 case Builtin::BI__builtin_floorf:
2172 case Builtin::BI__builtin_floorf16:
2173 case Builtin::BI__builtin_floorl:
2174 case Builtin::BI__builtin_floorf128:
2175 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2176 Intrinsic::floor,
2177 Intrinsic::experimental_constrained_floor));
2178
2179 case Builtin::BIfma:
2180 case Builtin::BIfmaf:
2181 case Builtin::BIfmal:
2182 case Builtin::BI__builtin_fma:
2183 case Builtin::BI__builtin_fmaf:
2184 case Builtin::BI__builtin_fmaf16:
2185 case Builtin::BI__builtin_fmal:
2186 case Builtin::BI__builtin_fmaf128:
2187 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2188 Intrinsic::fma,
2189 Intrinsic::experimental_constrained_fma));
2190
2191 case Builtin::BIfmax:
2192 case Builtin::BIfmaxf:
2193 case Builtin::BIfmaxl:
2194 case Builtin::BI__builtin_fmax:
2195 case Builtin::BI__builtin_fmaxf:
2196 case Builtin::BI__builtin_fmaxf16:
2197 case Builtin::BI__builtin_fmaxl:
2198 case Builtin::BI__builtin_fmaxf128:
2199 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2200 Intrinsic::maxnum,
2201 Intrinsic::experimental_constrained_maxnum));
2202
2203 case Builtin::BIfmin:
2204 case Builtin::BIfminf:
2205 case Builtin::BIfminl:
2206 case Builtin::BI__builtin_fmin:
2207 case Builtin::BI__builtin_fminf:
2208 case Builtin::BI__builtin_fminf16:
2209 case Builtin::BI__builtin_fminl:
2210 case Builtin::BI__builtin_fminf128:
2211 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2212 Intrinsic::minnum,
2213 Intrinsic::experimental_constrained_minnum));
2214
2215 // fmod() is a special-case. It maps to the frem instruction rather than an
2216 // LLVM intrinsic.
2217 case Builtin::BIfmod:
2218 case Builtin::BIfmodf:
2219 case Builtin::BIfmodl:
2220 case Builtin::BI__builtin_fmod:
2221 case Builtin::BI__builtin_fmodf:
2222 case Builtin::BI__builtin_fmodf16:
2223 case Builtin::BI__builtin_fmodl:
2224 case Builtin::BI__builtin_fmodf128: {
2225 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2226 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2227 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2228 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2229 }
2230
2231 case Builtin::BIlog:
2232 case Builtin::BIlogf:
2233 case Builtin::BIlogl:
2234 case Builtin::BI__builtin_log:
2235 case Builtin::BI__builtin_logf:
2236 case Builtin::BI__builtin_logf16:
2237 case Builtin::BI__builtin_logl:
2238 case Builtin::BI__builtin_logf128:
2239 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2240 Intrinsic::log,
2241 Intrinsic::experimental_constrained_log));
2242
2243 case Builtin::BIlog10:
2244 case Builtin::BIlog10f:
2245 case Builtin::BIlog10l:
2246 case Builtin::BI__builtin_log10:
2247 case Builtin::BI__builtin_log10f:
2248 case Builtin::BI__builtin_log10f16:
2249 case Builtin::BI__builtin_log10l:
2250 case Builtin::BI__builtin_log10f128:
2251 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2252 Intrinsic::log10,
2253 Intrinsic::experimental_constrained_log10));
2254
2255 case Builtin::BIlog2:
2256 case Builtin::BIlog2f:
2257 case Builtin::BIlog2l:
2258 case Builtin::BI__builtin_log2:
2259 case Builtin::BI__builtin_log2f:
2260 case Builtin::BI__builtin_log2f16:
2261 case Builtin::BI__builtin_log2l:
2262 case Builtin::BI__builtin_log2f128:
2263 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2264 Intrinsic::log2,
2265 Intrinsic::experimental_constrained_log2));
2266
2267 case Builtin::BInearbyint:
2268 case Builtin::BInearbyintf:
2269 case Builtin::BInearbyintl:
2270 case Builtin::BI__builtin_nearbyint:
2271 case Builtin::BI__builtin_nearbyintf:
2272 case Builtin::BI__builtin_nearbyintl:
2273 case Builtin::BI__builtin_nearbyintf128:
2274 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2275 Intrinsic::nearbyint,
2276 Intrinsic::experimental_constrained_nearbyint));
2277
2278 case Builtin::BIpow:
2279 case Builtin::BIpowf:
2280 case Builtin::BIpowl:
2281 case Builtin::BI__builtin_pow:
2282 case Builtin::BI__builtin_powf:
2283 case Builtin::BI__builtin_powf16:
2284 case Builtin::BI__builtin_powl:
2285 case Builtin::BI__builtin_powf128:
2286 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2287 Intrinsic::pow,
2288 Intrinsic::experimental_constrained_pow));
2289
2290 case Builtin::BIrint:
2291 case Builtin::BIrintf:
2292 case Builtin::BIrintl:
2293 case Builtin::BI__builtin_rint:
2294 case Builtin::BI__builtin_rintf:
2295 case Builtin::BI__builtin_rintf16:
2296 case Builtin::BI__builtin_rintl:
2297 case Builtin::BI__builtin_rintf128:
2298 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2299 Intrinsic::rint,
2300 Intrinsic::experimental_constrained_rint));
2301
2302 case Builtin::BIround:
2303 case Builtin::BIroundf:
2304 case Builtin::BIroundl:
2305 case Builtin::BI__builtin_round:
2306 case Builtin::BI__builtin_roundf:
2307 case Builtin::BI__builtin_roundf16:
2308 case Builtin::BI__builtin_roundl:
2309 case Builtin::BI__builtin_roundf128:
2310 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2311 Intrinsic::round,
2312 Intrinsic::experimental_constrained_round));
2313
2314 case Builtin::BIsin:
2315 case Builtin::BIsinf:
2316 case Builtin::BIsinl:
2317 case Builtin::BI__builtin_sin:
2318 case Builtin::BI__builtin_sinf:
2319 case Builtin::BI__builtin_sinf16:
2320 case Builtin::BI__builtin_sinl:
2321 case Builtin::BI__builtin_sinf128:
2322 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2323 Intrinsic::sin,
2324 Intrinsic::experimental_constrained_sin));
2325
2326 case Builtin::BIsqrt:
2327 case Builtin::BIsqrtf:
2328 case Builtin::BIsqrtl:
2329 case Builtin::BI__builtin_sqrt:
2330 case Builtin::BI__builtin_sqrtf:
2331 case Builtin::BI__builtin_sqrtf16:
2332 case Builtin::BI__builtin_sqrtl:
2333 case Builtin::BI__builtin_sqrtf128:
2334 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2335 Intrinsic::sqrt,
2336 Intrinsic::experimental_constrained_sqrt));
2337
2338 case Builtin::BItrunc:
2339 case Builtin::BItruncf:
2340 case Builtin::BItruncl:
2341 case Builtin::BI__builtin_trunc:
2342 case Builtin::BI__builtin_truncf:
2343 case Builtin::BI__builtin_truncf16:
2344 case Builtin::BI__builtin_truncl:
2345 case Builtin::BI__builtin_truncf128:
2346 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2347 Intrinsic::trunc,
2348 Intrinsic::experimental_constrained_trunc));
2349
2350 case Builtin::BIlround:
2351 case Builtin::BIlroundf:
2352 case Builtin::BIlroundl:
2353 case Builtin::BI__builtin_lround:
2354 case Builtin::BI__builtin_lroundf:
2355 case Builtin::BI__builtin_lroundl:
2356 case Builtin::BI__builtin_lroundf128:
2357 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2358 *this, E, Intrinsic::lround,
2359 Intrinsic::experimental_constrained_lround));
2360
2361 case Builtin::BIllround:
2362 case Builtin::BIllroundf:
2363 case Builtin::BIllroundl:
2364 case Builtin::BI__builtin_llround:
2365 case Builtin::BI__builtin_llroundf:
2366 case Builtin::BI__builtin_llroundl:
2367 case Builtin::BI__builtin_llroundf128:
2368 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2369 *this, E, Intrinsic::llround,
2370 Intrinsic::experimental_constrained_llround));
2371
2372 case Builtin::BIlrint:
2373 case Builtin::BIlrintf:
2374 case Builtin::BIlrintl:
2375 case Builtin::BI__builtin_lrint:
2376 case Builtin::BI__builtin_lrintf:
2377 case Builtin::BI__builtin_lrintl:
2378 case Builtin::BI__builtin_lrintf128:
2379 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2380 *this, E, Intrinsic::lrint,
2381 Intrinsic::experimental_constrained_lrint));
2382
2383 case Builtin::BIllrint:
2384 case Builtin::BIllrintf:
2385 case Builtin::BIllrintl:
2386 case Builtin::BI__builtin_llrint:
2387 case Builtin::BI__builtin_llrintf:
2388 case Builtin::BI__builtin_llrintl:
2389 case Builtin::BI__builtin_llrintf128:
2390 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2391 *this, E, Intrinsic::llrint,
2392 Intrinsic::experimental_constrained_llrint));
2393
2394 default:
2395 break;
2396 }
2397 }
2398
2399 switch (BuiltinIDIfNoAsmLabel) {
2400 default: break;
2401 case Builtin::BI__builtin___CFStringMakeConstantString:
2402 case Builtin::BI__builtin___NSStringMakeConstantString:
2403 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2404 case Builtin::BI__builtin_stdarg_start:
2405 case Builtin::BI__builtin_va_start:
2406 case Builtin::BI__va_start:
2407 case Builtin::BI__builtin_va_end:
2408 return RValue::get(
2409 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2410 ? EmitScalarExpr(E->getArg(0))
2411 : EmitVAListRef(E->getArg(0)).getPointer(),
2412 BuiltinID != Builtin::BI__builtin_va_end));
2413 case Builtin::BI__builtin_va_copy: {
2414 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2415 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2416
2417 llvm::Type *Type = Int8PtrTy;
2418
2419 DstPtr = Builder.CreateBitCast(DstPtr, Type);
2420 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
2421 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
2422 {DstPtr, SrcPtr}));
2423 }
2424 case Builtin::BI__builtin_abs:
2425 case Builtin::BI__builtin_labs:
2426 case Builtin::BI__builtin_llabs: {
2427 // X < 0 ? -X : X
2428 // The negation has 'nsw' because abs of INT_MIN is undefined.
2429 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2430 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
2431 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
2432 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2433 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
2434 return RValue::get(Result);
2435 }
2436 case Builtin::BI__builtin_complex: {
2437 Value *Real = EmitScalarExpr(E->getArg(0));
2438 Value *Imag = EmitScalarExpr(E->getArg(1));
2439 return RValue::getComplex({Real, Imag});
2440 }
2441 case Builtin::BI__builtin_conj:
2442 case Builtin::BI__builtin_conjf:
2443 case Builtin::BI__builtin_conjl:
2444 case Builtin::BIconj:
2445 case Builtin::BIconjf:
2446 case Builtin::BIconjl: {
2447 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2448 Value *Real = ComplexVal.first;
2449 Value *Imag = ComplexVal.second;
2450 Imag = Builder.CreateFNeg(Imag, "neg");
2451 return RValue::getComplex(std::make_pair(Real, Imag));
2452 }
2453 case Builtin::BI__builtin_creal:
2454 case Builtin::BI__builtin_crealf:
2455 case Builtin::BI__builtin_creall:
2456 case Builtin::BIcreal:
2457 case Builtin::BIcrealf:
2458 case Builtin::BIcreall: {
2459 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2460 return RValue::get(ComplexVal.first);
2461 }
2462
2463 case Builtin::BI__builtin_dump_struct: {
2464 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2465 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2466 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2467
2468 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2469 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2470
2471 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2472 QualType Arg0Type = Arg0->getType()->getPointeeType();
2473
2474 Value *RecordPtr = EmitScalarExpr(Arg0);
2475 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2476 {LLVMFuncType, Func}, 0);
2477 return RValue::get(Res);
2478 }
2479
2480 case Builtin::BI__builtin_preserve_access_index: {
2481 // Only enabled preserved access index region when debuginfo
2482 // is available as debuginfo is needed to preserve user-level
2483 // access pattern.
2484 if (!getDebugInfo()) {
2485 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2486 return RValue::get(EmitScalarExpr(E->getArg(0)));
2487 }
2488
2489 // Nested builtin_preserve_access_index() not supported
2490 if (IsInPreservedAIRegion) {
2491 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2492 return RValue::get(EmitScalarExpr(E->getArg(0)));
2493 }
2494
2495 IsInPreservedAIRegion = true;
2496 Value *Res = EmitScalarExpr(E->getArg(0));
2497 IsInPreservedAIRegion = false;
2498 return RValue::get(Res);
2499 }
2500
2501 case Builtin::BI__builtin_cimag:
2502 case Builtin::BI__builtin_cimagf:
2503 case Builtin::BI__builtin_cimagl:
2504 case Builtin::BIcimag:
2505 case Builtin::BIcimagf:
2506 case Builtin::BIcimagl: {
2507 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2508 return RValue::get(ComplexVal.second);
2509 }
2510
2511 case Builtin::BI__builtin_clrsb:
2512 case Builtin::BI__builtin_clrsbl:
2513 case Builtin::BI__builtin_clrsbll: {
2514 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2515 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2516
2517 llvm::Type *ArgType = ArgValue->getType();
2518 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2519
2520 llvm::Type *ResultType = ConvertType(E->getType());
2521 Value *Zero = llvm::Constant::getNullValue(ArgType);
2522 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2523 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2524 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2525 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2526 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2527 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2528 "cast");
2529 return RValue::get(Result);
2530 }
2531 case Builtin::BI__builtin_ctzs:
2532 case Builtin::BI__builtin_ctz:
2533 case Builtin::BI__builtin_ctzl:
2534 case Builtin::BI__builtin_ctzll: {
2535 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2536
2537 llvm::Type *ArgType = ArgValue->getType();
2538 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2539
2540 llvm::Type *ResultType = ConvertType(E->getType());
2541 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2542 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2543 if (Result->getType() != ResultType)
2544 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2545 "cast");
2546 return RValue::get(Result);
2547 }
2548 case Builtin::BI__builtin_clzs:
2549 case Builtin::BI__builtin_clz:
2550 case Builtin::BI__builtin_clzl:
2551 case Builtin::BI__builtin_clzll: {
2552 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2553
2554 llvm::Type *ArgType = ArgValue->getType();
2555 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2556
2557 llvm::Type *ResultType = ConvertType(E->getType());
2558 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2559 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2560 if (Result->getType() != ResultType)
2561 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2562 "cast");
2563 return RValue::get(Result);
2564 }
2565 case Builtin::BI__builtin_ffs:
2566 case Builtin::BI__builtin_ffsl:
2567 case Builtin::BI__builtin_ffsll: {
2568 // ffs(x) -> x ? cttz(x) + 1 : 0
2569 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2570
2571 llvm::Type *ArgType = ArgValue->getType();
2572 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2573
2574 llvm::Type *ResultType = ConvertType(E->getType());
2575 Value *Tmp =
2576 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2577 llvm::ConstantInt::get(ArgType, 1));
2578 Value *Zero = llvm::Constant::getNullValue(ArgType);
2579 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2580 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2581 if (Result->getType() != ResultType)
2582 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2583 "cast");
2584 return RValue::get(Result);
2585 }
2586 case Builtin::BI__builtin_parity:
2587 case Builtin::BI__builtin_parityl:
2588 case Builtin::BI__builtin_parityll: {
2589 // parity(x) -> ctpop(x) & 1
2590 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2591
2592 llvm::Type *ArgType = ArgValue->getType();
2593 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2594
2595 llvm::Type *ResultType = ConvertType(E->getType());
2596 Value *Tmp = Builder.CreateCall(F, ArgValue);
2597 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2598 if (Result->getType() != ResultType)
2599 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2600 "cast");
2601 return RValue::get(Result);
2602 }
2603 case Builtin::BI__lzcnt16:
2604 case Builtin::BI__lzcnt:
2605 case Builtin::BI__lzcnt64: {
2606 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2607
2608 llvm::Type *ArgType = ArgValue->getType();
2609 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2610
2611 llvm::Type *ResultType = ConvertType(E->getType());
2612 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2613 if (Result->getType() != ResultType)
2614 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2615 "cast");
2616 return RValue::get(Result);
2617 }
2618 case Builtin::BI__popcnt16:
2619 case Builtin::BI__popcnt:
2620 case Builtin::BI__popcnt64:
2621 case Builtin::BI__builtin_popcount:
2622 case Builtin::BI__builtin_popcountl:
2623 case Builtin::BI__builtin_popcountll: {
2624 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2625
2626 llvm::Type *ArgType = ArgValue->getType();
2627 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2628
2629 llvm::Type *ResultType = ConvertType(E->getType());
2630 Value *Result = Builder.CreateCall(F, ArgValue);
2631 if (Result->getType() != ResultType)
2632 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2633 "cast");
2634 return RValue::get(Result);
2635 }
2636 case Builtin::BI__builtin_unpredictable: {
2637 // Always return the argument of __builtin_unpredictable. LLVM does not
2638 // handle this builtin. Metadata for this builtin should be added directly
2639 // to instructions such as branches or switches that use it.
2640 return RValue::get(EmitScalarExpr(E->getArg(0)));
2641 }
2642 case Builtin::BI__builtin_expect: {
2643 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2644 llvm::Type *ArgType = ArgValue->getType();
2645
2646 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2647 // Don't generate llvm.expect on -O0 as the backend won't use it for
2648 // anything.
2649 // Note, we still IRGen ExpectedValue because it could have side-effects.
2650 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2651 return RValue::get(ArgValue);
2652
2653 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2654 Value *Result =
2655 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2656 return RValue::get(Result);
2657 }
2658 case Builtin::BI__builtin_expect_with_probability: {
2659 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2660 llvm::Type *ArgType = ArgValue->getType();
2661
2662 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2663 llvm::APFloat Probability(0.0);
2664 const Expr *ProbArg = E->getArg(2);
2665 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2666 assert(EvalSucceed && "probability should be able to evaluate as float");
2667 (void)EvalSucceed;
2668 bool LoseInfo = false;
2669 Probability.convert(llvm::APFloat::IEEEdouble(),
2670 llvm::RoundingMode::Dynamic, &LoseInfo);
2671 llvm::Type *Ty = ConvertType(ProbArg->getType());
2672 Constant *Confidence = ConstantFP::get(Ty, Probability);
2673 // Don't generate llvm.expect.with.probability on -O0 as the backend
2674 // won't use it for anything.
2675 // Note, we still IRGen ExpectedValue because it could have side-effects.
2676 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2677 return RValue::get(ArgValue);
2678
2679 Function *FnExpect =
2680 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2681 Value *Result = Builder.CreateCall(
2682 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2683 return RValue::get(Result);
2684 }
2685 case Builtin::BI__builtin_assume_aligned: {
2686 const Expr *Ptr = E->getArg(0);
2687 Value *PtrValue = EmitScalarExpr(Ptr);
2688 Value *OffsetValue =
2689 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2690
2691 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2692 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2693 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2694 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2695 llvm::Value::MaximumAlignment);
2696
2697 emitAlignmentAssumption(PtrValue, Ptr,
2698 /*The expr loc is sufficient.*/ SourceLocation(),
2699 AlignmentCI, OffsetValue);
2700 return RValue::get(PtrValue);
2701 }
2702 case Builtin::BI__assume:
2703 case Builtin::BI__builtin_assume: {
2704 if (E->getArg(0)->HasSideEffects(getContext()))
2705 return RValue::get(nullptr);
2706
2707 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2708 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2709 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2710 }
2711 case Builtin::BI__builtin_bswap16:
2712 case Builtin::BI__builtin_bswap32:
2713 case Builtin::BI__builtin_bswap64: {
2714 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2715 }
2716 case Builtin::BI__builtin_bitreverse8:
2717 case Builtin::BI__builtin_bitreverse16:
2718 case Builtin::BI__builtin_bitreverse32:
2719 case Builtin::BI__builtin_bitreverse64: {
2720 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2721 }
2722 case Builtin::BI__builtin_rotateleft8:
2723 case Builtin::BI__builtin_rotateleft16:
2724 case Builtin::BI__builtin_rotateleft32:
2725 case Builtin::BI__builtin_rotateleft64:
2726 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2727 case Builtin::BI_rotl16:
2728 case Builtin::BI_rotl:
2729 case Builtin::BI_lrotl:
2730 case Builtin::BI_rotl64:
2731 return emitRotate(E, false);
2732
2733 case Builtin::BI__builtin_rotateright8:
2734 case Builtin::BI__builtin_rotateright16:
2735 case Builtin::BI__builtin_rotateright32:
2736 case Builtin::BI__builtin_rotateright64:
2737 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2738 case Builtin::BI_rotr16:
2739 case Builtin::BI_rotr:
2740 case Builtin::BI_lrotr:
2741 case Builtin::BI_rotr64:
2742 return emitRotate(E, true);
2743
2744 case Builtin::BI__builtin_constant_p: {
2745 llvm::Type *ResultType = ConvertType(E->getType());
2746
2747 const Expr *Arg = E->getArg(0);
2748 QualType ArgType = Arg->getType();
2749 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2750 // and likely a mistake.
2751 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2752 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2753 // Per the GCC documentation, only numeric constants are recognized after
2754 // inlining.
2755 return RValue::get(ConstantInt::get(ResultType, 0));
2756
2757 if (Arg->HasSideEffects(getContext()))
2758 // The argument is unevaluated, so be conservative if it might have
2759 // side-effects.
2760 return RValue::get(ConstantInt::get(ResultType, 0));
2761
2762 Value *ArgValue = EmitScalarExpr(Arg);
2763 if (ArgType->isObjCObjectPointerType()) {
2764 // Convert Objective-C objects to id because we cannot distinguish between
2765 // LLVM types for Obj-C classes as they are opaque.
2766 ArgType = CGM.getContext().getObjCIdType();
2767 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2768 }
2769 Function *F =
2770 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2771 Value *Result = Builder.CreateCall(F, ArgValue);
2772 if (Result->getType() != ResultType)
2773 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2774 return RValue::get(Result);
2775 }
2776 case Builtin::BI__builtin_dynamic_object_size:
2777 case Builtin::BI__builtin_object_size: {
2778 unsigned Type =
2779 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2780 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2781
2782 // We pass this builtin onto the optimizer so that it can figure out the
2783 // object size in more complex cases.
2784 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2785 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2786 /*EmittedE=*/nullptr, IsDynamic));
2787 }
2788 case Builtin::BI__builtin_prefetch: {
2789 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2790 // FIXME: Technically these constants should of type 'int', yes?
2791 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2792 llvm::ConstantInt::get(Int32Ty, 0);
2793 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2794 llvm::ConstantInt::get(Int32Ty, 3);
2795 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2796 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2797 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2798 }
2799 case Builtin::BI__builtin_readcyclecounter: {
2800 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2801 return RValue::get(Builder.CreateCall(F));
2802 }
2803 case Builtin::BI__builtin___clear_cache: {
2804 Value *Begin = EmitScalarExpr(E->getArg(0));
2805 Value *End = EmitScalarExpr(E->getArg(1));
2806 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2807 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2808 }
2809 case Builtin::BI__builtin_trap:
2810 return RValue::get(EmitTrapCall(Intrinsic::trap));
2811 case Builtin::BI__debugbreak:
2812 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2813 case Builtin::BI__builtin_unreachable: {
2814 EmitUnreachable(E->getExprLoc());
2815
2816 // We do need to preserve an insertion point.
2817 EmitBlock(createBasicBlock("unreachable.cont"));
2818
2819 return RValue::get(nullptr);
2820 }
2821
2822 case Builtin::BI__builtin_powi:
2823 case Builtin::BI__builtin_powif:
2824 case Builtin::BI__builtin_powil:
2825 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2826 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2827
2828 case Builtin::BI__builtin_isgreater:
2829 case Builtin::BI__builtin_isgreaterequal:
2830 case Builtin::BI__builtin_isless:
2831 case Builtin::BI__builtin_islessequal:
2832 case Builtin::BI__builtin_islessgreater:
2833 case Builtin::BI__builtin_isunordered: {
2834 // Ordered comparisons: we know the arguments to these are matching scalar
2835 // floating point values.
2836 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2837 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2838 Value *LHS = EmitScalarExpr(E->getArg(0));
2839 Value *RHS = EmitScalarExpr(E->getArg(1));
2840
2841 switch (BuiltinID) {
2842 default: llvm_unreachable("Unknown ordered comparison");
2843 case Builtin::BI__builtin_isgreater:
2844 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2845 break;
2846 case Builtin::BI__builtin_isgreaterequal:
2847 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2848 break;
2849 case Builtin::BI__builtin_isless:
2850 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2851 break;
2852 case Builtin::BI__builtin_islessequal:
2853 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2854 break;
2855 case Builtin::BI__builtin_islessgreater:
2856 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2857 break;
2858 case Builtin::BI__builtin_isunordered:
2859 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2860 break;
2861 }
2862 // ZExt bool to int type.
2863 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2864 }
2865 case Builtin::BI__builtin_isnan: {
2866 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2867 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2868 Value *V = EmitScalarExpr(E->getArg(0));
2869 V = Builder.CreateFCmpUNO(V, V, "cmp");
2870 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2871 }
2872
2873 case Builtin::BI__builtin_matrix_transpose: {
2874 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2875 Value *MatValue = EmitScalarExpr(E->getArg(0));
2876 MatrixBuilder<CGBuilderTy> MB(Builder);
2877 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
2878 MatrixTy->getNumColumns());
2879 return RValue::get(Result);
2880 }
2881
2882 case Builtin::BI__builtin_matrix_column_major_load: {
2883 MatrixBuilder<CGBuilderTy> MB(Builder);
2884 // Emit everything that isn't dependent on the first parameter type
2885 Value *Stride = EmitScalarExpr(E->getArg(3));
2886 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
2887 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
2888 assert(PtrTy && "arg0 must be of pointer type");
2889 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2890
2891 Address Src = EmitPointerWithAlignment(E->getArg(0));
2892 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
2893 E->getArg(0)->getExprLoc(), FD, 0);
2894 Value *Result = MB.CreateColumnMajorLoad(
2895 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
2896 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
2897 "matrix");
2898 return RValue::get(Result);
2899 }
2900
2901 case Builtin::BI__builtin_matrix_column_major_store: {
2902 MatrixBuilder<CGBuilderTy> MB(Builder);
2903 Value *Matrix = EmitScalarExpr(E->getArg(0));
2904 Address Dst = EmitPointerWithAlignment(E->getArg(1));
2905 Value *Stride = EmitScalarExpr(E->getArg(2));
2906
2907 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2908 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
2909 assert(PtrTy && "arg1 must be of pointer type");
2910 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2911
2912 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
2913 E->getArg(1)->getExprLoc(), FD, 0);
2914 Value *Result = MB.CreateColumnMajorStore(
2915 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
2916 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
2917 return RValue::get(Result);
2918 }
2919
2920 case Builtin::BIfinite:
2921 case Builtin::BI__finite:
2922 case Builtin::BIfinitef:
2923 case Builtin::BI__finitef:
2924 case Builtin::BIfinitel:
2925 case Builtin::BI__finitel:
2926 case Builtin::BI__builtin_isinf:
2927 case Builtin::BI__builtin_isfinite: {
2928 // isinf(x) --> fabs(x) == infinity
2929 // isfinite(x) --> fabs(x) != infinity
2930 // x != NaN via the ordered compare in either case.
2931 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2932 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2933 Value *V = EmitScalarExpr(E->getArg(0));
2934 Value *Fabs = EmitFAbs(*this, V);
2935 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2936 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2937 ? CmpInst::FCMP_OEQ
2938 : CmpInst::FCMP_ONE;
2939 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2940 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2941 }
2942
2943 case Builtin::BI__builtin_isinf_sign: {
2944 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2945 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2946 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2947 Value *Arg = EmitScalarExpr(E->getArg(0));
2948 Value *AbsArg = EmitFAbs(*this, Arg);
2949 Value *IsInf = Builder.CreateFCmpOEQ(
2950 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2951 Value *IsNeg = EmitSignBit(*this, Arg);
2952
2953 llvm::Type *IntTy = ConvertType(E->getType());
2954 Value *Zero = Constant::getNullValue(IntTy);
2955 Value *One = ConstantInt::get(IntTy, 1);
2956 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2957 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2958 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2959 return RValue::get(Result);
2960 }
2961
2962 case Builtin::BI__builtin_isnormal: {
2963 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2964 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2965 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2966 Value *V = EmitScalarExpr(E->getArg(0));
2967 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2968
2969 Value *Abs = EmitFAbs(*this, V);
2970 Value *IsLessThanInf =
2971 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2972 APFloat Smallest = APFloat::getSmallestNormalized(
2973 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2974 Value *IsNormal =
2975 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2976 "isnormal");
2977 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2978 V = Builder.CreateAnd(V, IsNormal, "and");
2979 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2980 }
2981
2982 case Builtin::BI__builtin_flt_rounds: {
2983 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2984
2985 llvm::Type *ResultType = ConvertType(E->getType());
2986 Value *Result = Builder.CreateCall(F);
2987 if (Result->getType() != ResultType)
2988 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2989 "cast");
2990 return RValue::get(Result);
2991 }
2992
2993 case Builtin::BI__builtin_fpclassify: {
2994 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2995 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2996 Value *V = EmitScalarExpr(E->getArg(5));
2997 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2998
2999 // Create Result
3000 BasicBlock *Begin = Builder.GetInsertBlock();
3001 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3002 Builder.SetInsertPoint(End);
3003 PHINode *Result =
3004 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3005 "fpclassify_result");
3006
3007 // if (V==0) return FP_ZERO
3008 Builder.SetInsertPoint(Begin);
3009 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3010 "iszero");
3011 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3012 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3013 Builder.CreateCondBr(IsZero, End, NotZero);
3014 Result->addIncoming(ZeroLiteral, Begin);
3015
3016 // if (V != V) return FP_NAN
3017 Builder.SetInsertPoint(NotZero);
3018 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3019 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3020 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3021 Builder.CreateCondBr(IsNan, End, NotNan);
3022 Result->addIncoming(NanLiteral, NotZero);
3023
3024 // if (fabs(V) == infinity) return FP_INFINITY
3025 Builder.SetInsertPoint(NotNan);
3026 Value *VAbs = EmitFAbs(*this, V);
3027 Value *IsInf =
3028 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3029 "isinf");
3030 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3031 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3032 Builder.CreateCondBr(IsInf, End, NotInf);
3033 Result->addIncoming(InfLiteral, NotNan);
3034
3035 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3036 Builder.SetInsertPoint(NotInf);
3037 APFloat Smallest = APFloat::getSmallestNormalized(
3038 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3039 Value *IsNormal =
3040 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3041 "isnormal");
3042 Value *NormalResult =
3043 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3044 EmitScalarExpr(E->getArg(3)));
3045 Builder.CreateBr(End);
3046 Result->addIncoming(NormalResult, NotInf);
3047
3048 // return Result
3049 Builder.SetInsertPoint(End);
3050 return RValue::get(Result);
3051 }
3052
3053 case Builtin::BIalloca:
3054 case Builtin::BI_alloca:
3055 case Builtin::BI__builtin_alloca: {
3056 Value *Size = EmitScalarExpr(E->getArg(0));
3057 const TargetInfo &TI = getContext().getTargetInfo();
3058 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3059 const Align SuitableAlignmentInBytes =
3060 CGM.getContext()
3061 .toCharUnitsFromBits(TI.getSuitableAlign())
3062 .getAsAlign();
3063 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3064 AI->setAlignment(SuitableAlignmentInBytes);
3065 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3066 return RValue::get(AI);
3067 }
3068
3069 case Builtin::BI__builtin_alloca_with_align: {
3070 Value *Size = EmitScalarExpr(E->getArg(0));
3071 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3072 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3073 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3074 const Align AlignmentInBytes =
3075 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3076 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3077 AI->setAlignment(AlignmentInBytes);
3078 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3079 return RValue::get(AI);
3080 }
3081
3082 case Builtin::BIbzero:
3083 case Builtin::BI__builtin_bzero: {
3084 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3085 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3086 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3087 E->getArg(0)->getExprLoc(), FD, 0);
3088 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3089 return RValue::get(nullptr);
3090 }
3091 case Builtin::BImemcpy:
3092 case Builtin::BI__builtin_memcpy:
3093 case Builtin::BImempcpy:
3094 case Builtin::BI__builtin_mempcpy: {
3095 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3096 Address Src = EmitPointerWithAlignment(E->getArg(1));
3097 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3098 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3099 E->getArg(0)->getExprLoc(), FD, 0);
3100 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3101 E->getArg(1)->getExprLoc(), FD, 1);
3102 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3103 if (BuiltinID == Builtin::BImempcpy ||
3104 BuiltinID == Builtin::BI__builtin_mempcpy)
3105 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
3106 else
3107 return RValue::get(Dest.getPointer());
3108 }
3109
3110 case Builtin::BI__builtin_memcpy_inline: {
3111 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3112 Address Src = EmitPointerWithAlignment(E->getArg(1));
3113 uint64_t Size =
3114 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3115 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3116 E->getArg(0)->getExprLoc(), FD, 0);
3117 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3118 E->getArg(1)->getExprLoc(), FD, 1);
3119 Builder.CreateMemCpyInline(Dest, Src, Size);
3120 return RValue::get(nullptr);
3121 }
3122
3123 case Builtin::BI__builtin_char_memchr:
3124 BuiltinID = Builtin::BI__builtin_memchr;
3125 break;
3126
3127 case Builtin::BI__builtin___memcpy_chk: {
3128 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3129 Expr::EvalResult SizeResult, DstSizeResult;
3130 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3131 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3132 break;
3133 llvm::APSInt Size = SizeResult.Val.getInt();
3134 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3135 if (Size.ugt(DstSize))
3136 break;
3137 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3138 Address Src = EmitPointerWithAlignment(E->getArg(1));
3139 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3140 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3141 return RValue::get(Dest.getPointer());
3142 }
3143
3144 case Builtin::BI__builtin_objc_memmove_collectable: {
3145 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3146 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3147 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3148 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3149 DestAddr, SrcAddr, SizeVal);
3150 return RValue::get(DestAddr.getPointer());
3151 }
3152
3153 case Builtin::BI__builtin___memmove_chk: {
3154 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3155 Expr::EvalResult SizeResult, DstSizeResult;
3156 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3157 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3158 break;
3159 llvm::APSInt Size = SizeResult.Val.getInt();
3160 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3161 if (Size.ugt(DstSize))
3162 break;
3163 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3164 Address Src = EmitPointerWithAlignment(E->getArg(1));
3165 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3166 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3167 return RValue::get(Dest.getPointer());
3168 }
3169
3170 case Builtin::BImemmove:
3171 case Builtin::BI__builtin_memmove: {
3172 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3173 Address Src = EmitPointerWithAlignment(E->getArg(1));
3174 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3175 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3176 E->getArg(0)->getExprLoc(), FD, 0);
3177 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3178 E->getArg(1)->getExprLoc(), FD, 1);
3179 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3180 return RValue::get(Dest.getPointer());
3181 }
3182 case Builtin::BImemset:
3183 case Builtin::BI__builtin_memset: {
3184 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3185 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3186 Builder.getInt8Ty());
3187 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3188 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3189 E->getArg(0)->getExprLoc(), FD, 0);
3190 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3191 return RValue::get(Dest.getPointer());
3192 }
3193 case Builtin::BI__builtin___memset_chk: {
3194 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3195 Expr::EvalResult SizeResult, DstSizeResult;
3196 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3197 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3198 break;
3199 llvm::APSInt Size = SizeResult.Val.getInt();
3200 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3201 if (Size.ugt(DstSize))
3202 break;
3203 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3204 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3205 Builder.getInt8Ty());
3206 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3207 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3208 return RValue::get(Dest.getPointer());
3209 }
3210 case Builtin::BI__builtin_wmemcmp: {
3211 // The MSVC runtime library does not provide a definition of wmemcmp, so we
3212 // need an inline implementation.
3213 if (!getTarget().getTriple().isOSMSVCRT())
3214 break;
3215
3216 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3217
3218 Value *Dst = EmitScalarExpr(E->getArg(0));
3219 Value *Src = EmitScalarExpr(E->getArg(1));
3220 Value *Size = EmitScalarExpr(E->getArg(2));
3221
3222 BasicBlock *Entry = Builder.GetInsertBlock();
3223 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3224 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3225 BasicBlock *Next = createBasicBlock("wmemcmp.next");
3226 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3227 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3228 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3229
3230 EmitBlock(CmpGT);
3231 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3232 DstPhi->addIncoming(Dst, Entry);
3233 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3234 SrcPhi->addIncoming(Src, Entry);
3235 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3236 SizePhi->addIncoming(Size, Entry);
3237 CharUnits WCharAlign =
3238 getContext().getTypeAlignInChars(getContext().WCharTy);
3239 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3240 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3241 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3242 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3243
3244 EmitBlock(CmpLT);
3245 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3246 Builder.CreateCondBr(DstLtSrc, Exit, Next);
3247
3248 EmitBlock(Next);
3249 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3250 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3251 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3252 Value *NextSizeEq0 =
3253 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3254 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3255 DstPhi->addIncoming(NextDst, Next);
3256 SrcPhi->addIncoming(NextSrc, Next);
3257 SizePhi->addIncoming(NextSize, Next);
3258
3259 EmitBlock(Exit);
3260 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3261 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3262 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3263 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3264 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3265 return RValue::get(Ret);
3266 }
3267 case Builtin::BI__builtin_dwarf_cfa: {
3268 // The offset in bytes from the first argument to the CFA.
3269 //
3270 // Why on earth is this in the frontend? Is there any reason at
3271 // all that the backend can't reasonably determine this while
3272 // lowering llvm.eh.dwarf.cfa()?
3273 //
3274 // TODO: If there's a satisfactory reason, add a target hook for
3275 // this instead of hard-coding 0, which is correct for most targets.
3276 int32_t Offset = 0;
3277
3278 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3279 return RValue::get(Builder.CreateCall(F,
3280 llvm::ConstantInt::get(Int32Ty, Offset)));
3281 }
3282 case Builtin::BI__builtin_return_address: {
3283 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3284 getContext().UnsignedIntTy);
3285 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3286 return RValue::get(Builder.CreateCall(F, Depth));
3287 }
3288 case Builtin::BI_ReturnAddress: {
3289 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3290 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3291 }
3292 case Builtin::BI__builtin_frame_address: {
3293 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3294 getContext().UnsignedIntTy);
3295 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3296 return RValue::get(Builder.CreateCall(F, Depth));
3297 }
3298 case Builtin::BI__builtin_extract_return_addr: {
3299 Value *Address = EmitScalarExpr(E->getArg(0));
3300 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3301 return RValue::get(Result);
3302 }
3303 case Builtin::BI__builtin_frob_return_addr: {
3304 Value *Address = EmitScalarExpr(E->getArg(0));
3305 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3306 return RValue::get(Result);
3307 }
3308 case Builtin::BI__builtin_dwarf_sp_column: {
3309 llvm::IntegerType *Ty
3310 = cast<llvm::IntegerType>(ConvertType(E->getType()));
3311 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3312 if (Column == -1) {
3313 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3314 return RValue::get(llvm::UndefValue::get(Ty));
3315 }
3316 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3317 }
3318 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3319 Value *Address = EmitScalarExpr(E->getArg(0));
3320 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3321 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3322 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3323 }
3324 case Builtin::BI__builtin_eh_return: {
3325 Value *Int = EmitScalarExpr(E->getArg(0));
3326 Value *Ptr = EmitScalarExpr(E->getArg(1));
3327
3328 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3329 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
3330 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
3331 Function *F =
3332 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3333 : Intrinsic::eh_return_i64);
3334 Builder.CreateCall(F, {Int, Ptr});
3335 Builder.CreateUnreachable();
3336
3337 // We do need to preserve an insertion point.
3338 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3339
3340 return RValue::get(nullptr);
3341 }
3342 case Builtin::BI__builtin_unwind_init: {
3343 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3344 return RValue::get(Builder.CreateCall(F));
3345 }
3346 case Builtin::BI__builtin_extend_pointer: {
3347 // Extends a pointer to the size of an _Unwind_Word, which is
3348 // uint64_t on all platforms. Generally this gets poked into a
3349 // register and eventually used as an address, so if the
3350 // addressing registers are wider than pointers and the platform
3351 // doesn't implicitly ignore high-order bits when doing
3352 // addressing, we need to make sure we zext / sext based on
3353 // the platform's expectations.
3354 //
3355 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3356
3357 // Cast the pointer to intptr_t.
3358 Value *Ptr = EmitScalarExpr(E->getArg(0));
3359 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3360
3361 // If that's 64 bits, we're done.
3362 if (IntPtrTy->getBitWidth() == 64)
3363 return RValue::get(Result);
3364
3365 // Otherwise, ask the codegen data what to do.
3366 if (getTargetHooks().extendPointerWithSExt())
3367 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3368 else
3369 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3370 }
3371 case Builtin::BI__builtin_setjmp: {
3372 // Buffer is a void**.
3373 Address Buf = EmitPointerWithAlignment(E->getArg(0));
3374
3375 // Store the frame pointer to the setjmp buffer.
3376 Value *FrameAddr = Builder.CreateCall(
3377 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3378 ConstantInt::get(Int32Ty, 0));
3379 Builder.CreateStore(FrameAddr, Buf);
3380
3381 // Store the stack pointer to the setjmp buffer.
3382 Value *StackAddr =
3383 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
3384 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3385 Builder.CreateStore(StackAddr, StackSaveSlot);
3386
3387 // Call LLVM's EH setjmp, which is lightweight.
3388 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3389 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3390 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3391 }
3392 case Builtin::BI__builtin_longjmp: {
3393 Value *Buf = EmitScalarExpr(E->getArg(0));
3394 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3395
3396 // Call LLVM's EH longjmp, which is lightweight.
3397 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3398
3399 // longjmp doesn't return; mark this as unreachable.
3400 Builder.CreateUnreachable();
3401
3402 // We do need to preserve an insertion point.
3403 EmitBlock(createBasicBlock("longjmp.cont"));
3404
3405 return RValue::get(nullptr);
3406 }
3407 case Builtin::BI__builtin_launder: {
3408 const Expr *Arg = E->getArg(0);
3409 QualType ArgTy = Arg->getType()->getPointeeType();
3410 Value *Ptr = EmitScalarExpr(Arg);
3411 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
3412 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
3413
3414 return RValue::get(Ptr);
3415 }
3416 case Builtin::BI__sync_fetch_and_add:
3417 case Builtin::BI__sync_fetch_and_sub:
3418 case Builtin::BI__sync_fetch_and_or:
3419 case Builtin::BI__sync_fetch_and_and:
3420 case Builtin::BI__sync_fetch_and_xor:
3421 case Builtin::BI__sync_fetch_and_nand:
3422 case Builtin::BI__sync_add_and_fetch:
3423 case Builtin::BI__sync_sub_and_fetch:
3424 case Builtin::BI__sync_and_and_fetch:
3425 case Builtin::BI__sync_or_and_fetch:
3426 case Builtin::BI__sync_xor_and_fetch:
3427 case Builtin::BI__sync_nand_and_fetch:
3428 case Builtin::BI__sync_val_compare_and_swap:
3429 case Builtin::BI__sync_bool_compare_and_swap:
3430 case Builtin::BI__sync_lock_test_and_set:
3431 case Builtin::BI__sync_lock_release:
3432 case Builtin::BI__sync_swap:
3433 llvm_unreachable("Shouldn't make it through sema");
3434 case Builtin::BI__sync_fetch_and_add_1:
3435 case Builtin::BI__sync_fetch_and_add_2:
3436 case Builtin::BI__sync_fetch_and_add_4:
3437 case Builtin::BI__sync_fetch_and_add_8:
3438 case Builtin::BI__sync_fetch_and_add_16:
3439 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
3440 case Builtin::BI__sync_fetch_and_sub_1:
3441 case Builtin::BI__sync_fetch_and_sub_2:
3442 case Builtin::BI__sync_fetch_and_sub_4:
3443 case Builtin::BI__sync_fetch_and_sub_8:
3444 case Builtin::BI__sync_fetch_and_sub_16:
3445 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
3446 case Builtin::BI__sync_fetch_and_or_1:
3447 case Builtin::BI__sync_fetch_and_or_2:
3448 case Builtin::BI__sync_fetch_and_or_4:
3449 case Builtin::BI__sync_fetch_and_or_8:
3450 case Builtin::BI__sync_fetch_and_or_16:
3451 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
3452 case Builtin::BI__sync_fetch_and_and_1:
3453 case Builtin::BI__sync_fetch_and_and_2:
3454 case Builtin::BI__sync_fetch_and_and_4:
3455 case Builtin::BI__sync_fetch_and_and_8:
3456 case Builtin::BI__sync_fetch_and_and_16:
3457 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3458 case Builtin::BI__sync_fetch_and_xor_1:
3459 case Builtin::BI__sync_fetch_and_xor_2:
3460 case Builtin::BI__sync_fetch_and_xor_4:
3461 case Builtin::BI__sync_fetch_and_xor_8:
3462 case Builtin::BI__sync_fetch_and_xor_16:
3463 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3464 case Builtin::BI__sync_fetch_and_nand_1:
3465 case Builtin::BI__sync_fetch_and_nand_2:
3466 case Builtin::BI__sync_fetch_and_nand_4:
3467 case Builtin::BI__sync_fetch_and_nand_8:
3468 case Builtin::BI__sync_fetch_and_nand_16:
3469 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3470
3471 // Clang extensions: not overloaded yet.
3472 case Builtin::BI__sync_fetch_and_min:
3473 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3474 case Builtin::BI__sync_fetch_and_max:
3475 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3476 case Builtin::BI__sync_fetch_and_umin:
3477 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3478 case Builtin::BI__sync_fetch_and_umax:
3479 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3480
3481 case Builtin::BI__sync_add_and_fetch_1:
3482 case Builtin::BI__sync_add_and_fetch_2:
3483 case Builtin::BI__sync_add_and_fetch_4:
3484 case Builtin::BI__sync_add_and_fetch_8:
3485 case Builtin::BI__sync_add_and_fetch_16:
3486 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3487 llvm::Instruction::Add);
3488 case Builtin::BI__sync_sub_and_fetch_1:
3489 case Builtin::BI__sync_sub_and_fetch_2:
3490 case Builtin::BI__sync_sub_and_fetch_4:
3491 case Builtin::BI__sync_sub_and_fetch_8:
3492 case Builtin::BI__sync_sub_and_fetch_16:
3493 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3494 llvm::Instruction::Sub);
3495 case Builtin::BI__sync_and_and_fetch_1:
3496 case Builtin::BI__sync_and_and_fetch_2:
3497 case Builtin::BI__sync_and_and_fetch_4:
3498 case Builtin::BI__sync_and_and_fetch_8:
3499 case Builtin::BI__sync_and_and_fetch_16:
3500 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3501 llvm::Instruction::And);
3502 case Builtin::BI__sync_or_and_fetch_1:
3503 case Builtin::BI__sync_or_and_fetch_2:
3504 case Builtin::BI__sync_or_and_fetch_4:
3505 case Builtin::BI__sync_or_and_fetch_8:
3506 case Builtin::BI__sync_or_and_fetch_16:
3507 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3508 llvm::Instruction::Or);
3509 case Builtin::BI__sync_xor_and_fetch_1:
3510 case Builtin::BI__sync_xor_and_fetch_2:
3511 case Builtin::BI__sync_xor_and_fetch_4:
3512 case Builtin::BI__sync_xor_and_fetch_8:
3513 case Builtin::BI__sync_xor_and_fetch_16:
3514 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3515 llvm::Instruction::Xor);
3516 case Builtin::BI__sync_nand_and_fetch_1:
3517 case Builtin::BI__sync_nand_and_fetch_2:
3518 case Builtin::BI__sync_nand_and_fetch_4:
3519 case Builtin::BI__sync_nand_and_fetch_8:
3520 case Builtin::BI__sync_nand_and_fetch_16:
3521 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3522 llvm::Instruction::And, true);
3523
3524 case Builtin::BI__sync_val_compare_and_swap_1:
3525 case Builtin::BI__sync_val_compare_and_swap_2:
3526 case Builtin::BI__sync_val_compare_and_swap_4:
3527 case Builtin::BI__sync_val_compare_and_swap_8:
3528 case Builtin::BI__sync_val_compare_and_swap_16:
3529 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3530
3531 case Builtin::BI__sync_bool_compare_and_swap_1:
3532 case Builtin::BI__sync_bool_compare_and_swap_2:
3533 case Builtin::BI__sync_bool_compare_and_swap_4:
3534 case Builtin::BI__sync_bool_compare_and_swap_8:
3535 case Builtin::BI__sync_bool_compare_and_swap_16:
3536 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3537
3538 case Builtin::BI__sync_swap_1:
3539 case Builtin::BI__sync_swap_2:
3540 case Builtin::BI__sync_swap_4:
3541 case Builtin::BI__sync_swap_8:
3542 case Builtin::BI__sync_swap_16:
3543 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3544
3545 case Builtin::BI__sync_lock_test_and_set_1:
3546 case Builtin::BI__sync_lock_test_and_set_2:
3547 case Builtin::BI__sync_lock_test_and_set_4:
3548 case Builtin::BI__sync_lock_test_and_set_8:
3549 case Builtin::BI__sync_lock_test_and_set_16:
3550 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3551
3552 case Builtin::BI__sync_lock_release_1:
3553 case Builtin::BI__sync_lock_release_2:
3554 case Builtin::BI__sync_lock_release_4:
3555 case Builtin::BI__sync_lock_release_8:
3556 case Builtin::BI__sync_lock_release_16: {
3557 Value *Ptr = EmitScalarExpr(E->getArg(0));
3558 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3559 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3560 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3561 StoreSize.getQuantity() * 8);
3562 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3563 llvm::StoreInst *Store =
3564 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3565 StoreSize);
3566 Store->setAtomic(llvm::AtomicOrdering::Release);
3567 return RValue::get(nullptr);
3568 }
3569
3570 case Builtin::BI__sync_synchronize: {
3571 // We assume this is supposed to correspond to a C++0x-style
3572 // sequentially-consistent fence (i.e. this is only usable for
3573 // synchronization, not device I/O or anything like that). This intrinsic
3574 // is really badly designed in the sense that in theory, there isn't
3575 // any way to safely use it... but in practice, it mostly works
3576 // to use it with non-atomic loads and stores to get acquire/release
3577 // semantics.
3578 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3579 return RValue::get(nullptr);
3580 }
3581
3582 case Builtin::BI__builtin_nontemporal_load:
3583 return RValue::get(EmitNontemporalLoad(*this, E));
3584 case Builtin::BI__builtin_nontemporal_store:
3585 return RValue::get(EmitNontemporalStore(*this, E));
3586 case Builtin::BI__c11_atomic_is_lock_free:
3587 case Builtin::BI__atomic_is_lock_free: {
3588 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3589 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3590 // _Atomic(T) is always properly-aligned.
3591 const char *LibCallName = "__atomic_is_lock_free";
3592 CallArgList Args;
3593 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3594 getContext().getSizeType());
3595 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3596 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3597 getContext().VoidPtrTy);
3598 else
3599 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3600 getContext().VoidPtrTy);
3601 const CGFunctionInfo &FuncInfo =
3602 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3603 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3604 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3605 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3606 ReturnValueSlot(), Args);
3607 }
3608
3609 case Builtin::BI__atomic_test_and_set: {
3610 // Look at the argument type to determine whether this is a volatile
3611 // operation. The parameter type is always volatile.
3612 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3613 bool Volatile =
3614 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3615
3616 Value *Ptr = EmitScalarExpr(E->getArg(0));
3617 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3618 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3619 Value *NewVal = Builder.getInt8(1);
3620 Value *Order = EmitScalarExpr(E->getArg(1));
3621 if (isa<llvm::ConstantInt>(Order)) {
3622 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3623 AtomicRMWInst *Result = nullptr;
3624 switch (ord) {
3625 case 0: // memory_order_relaxed
3626 default: // invalid order
3627 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3628 llvm::AtomicOrdering::Monotonic);
3629 break;
3630 case 1: // memory_order_consume
3631 case 2: // memory_order_acquire
3632 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3633 llvm::AtomicOrdering::Acquire);
3634 break;
3635 case 3: // memory_order_release
3636 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3637 llvm::AtomicOrdering::Release);
3638 break;
3639 case 4: // memory_order_acq_rel
3640
3641 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3642 llvm::AtomicOrdering::AcquireRelease);
3643 break;
3644 case 5: // memory_order_seq_cst
3645 Result = Builder.CreateAtomicRMW(
3646 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3647 llvm::AtomicOrdering::SequentiallyConsistent);
3648 break;
3649 }
3650 Result->setVolatile(Volatile);
3651 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3652 }
3653
3654 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3655
3656 llvm::BasicBlock *BBs[5] = {
3657 createBasicBlock("monotonic", CurFn),
3658 createBasicBlock("acquire", CurFn),
3659 createBasicBlock("release", CurFn),
3660 createBasicBlock("acqrel", CurFn),
3661 createBasicBlock("seqcst", CurFn)
3662 };
3663 llvm::AtomicOrdering Orders[5] = {
3664 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3665 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3666 llvm::AtomicOrdering::SequentiallyConsistent};
3667
3668 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3669 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3670
3671 Builder.SetInsertPoint(ContBB);
3672 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3673
3674 for (unsigned i = 0; i < 5; ++i) {
3675 Builder.SetInsertPoint(BBs[i]);
3676 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3677 Ptr, NewVal, Orders[i]);
3678 RMW->setVolatile(Volatile);
3679 Result->addIncoming(RMW, BBs[i]);
3680 Builder.CreateBr(ContBB);
3681 }
3682
3683 SI->addCase(Builder.getInt32(0), BBs[0]);
3684 SI->addCase(Builder.getInt32(1), BBs[1]);
3685 SI->addCase(Builder.getInt32(2), BBs[1]);
3686 SI->addCase(Builder.getInt32(3), BBs[2]);
3687 SI->addCase(Builder.getInt32(4), BBs[3]);
3688 SI->addCase(Builder.getInt32(5), BBs[4]);
3689
3690 Builder.SetInsertPoint(ContBB);
3691 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3692 }
3693
3694 case Builtin::BI__atomic_clear: {
3695 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3696 bool Volatile =
3697 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3698
3699 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3700 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3701 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3702 Value *NewVal = Builder.getInt8(0);
3703 Value *Order = EmitScalarExpr(E->getArg(1));
3704 if (isa<llvm::ConstantInt>(Order)) {
3705 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3706 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3707 switch (ord) {
3708 case 0: // memory_order_relaxed
3709 default: // invalid order
3710 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3711 break;
3712 case 3: // memory_order_release
3713 Store->setOrdering(llvm::AtomicOrdering::Release);
3714 break;
3715 case 5: // memory_order_seq_cst
3716 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3717 break;
3718 }
3719 return RValue::get(nullptr);
3720 }
3721
3722 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3723
3724 llvm::BasicBlock *BBs[3] = {
3725 createBasicBlock("monotonic", CurFn),
3726 createBasicBlock("release", CurFn),
3727 createBasicBlock("seqcst", CurFn)
3728 };
3729 llvm::AtomicOrdering Orders[3] = {
3730 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3731 llvm::AtomicOrdering::SequentiallyConsistent};
3732
3733 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3734 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3735
3736 for (unsigned i = 0; i < 3; ++i) {
3737 Builder.SetInsertPoint(BBs[i]);
3738 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3739 Store->setOrdering(Orders[i]);
3740 Builder.CreateBr(ContBB);
3741 }
3742
3743 SI->addCase(Builder.getInt32(0), BBs[0]);
3744 SI->addCase(Builder.getInt32(3), BBs[1]);
3745 SI->addCase(Builder.getInt32(5), BBs[2]);
3746
3747 Builder.SetInsertPoint(ContBB);
3748 return RValue::get(nullptr);
3749 }
3750
3751 case Builtin::BI__atomic_thread_fence:
3752 case Builtin::BI__atomic_signal_fence:
3753 case Builtin::BI__c11_atomic_thread_fence:
3754 case Builtin::BI__c11_atomic_signal_fence: {
3755 llvm::SyncScope::ID SSID;
3756 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3757 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3758 SSID = llvm::SyncScope::SingleThread;
3759 else
3760 SSID = llvm::SyncScope::System;
3761 Value *Order = EmitScalarExpr(E->getArg(0));
3762 if (isa<llvm::ConstantInt>(Order)) {
3763 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3764 switch (ord) {
3765 case 0: // memory_order_relaxed
3766 default: // invalid order
3767 break;
3768 case 1: // memory_order_consume
3769 case 2: // memory_order_acquire
3770 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3771 break;
3772 case 3: // memory_order_release
3773 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3774 break;
3775 case 4: // memory_order_acq_rel
3776 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3777 break;
3778 case 5: // memory_order_seq_cst
3779 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3780 break;
3781 }
3782 return RValue::get(nullptr);
3783 }
3784
3785 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3786 AcquireBB = createBasicBlock("acquire", CurFn);
3787 ReleaseBB = createBasicBlock("release", CurFn);
3788 AcqRelBB = createBasicBlock("acqrel", CurFn);
3789 SeqCstBB = createBasicBlock("seqcst", CurFn);
3790 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3791
3792 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3793 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3794
3795 Builder.SetInsertPoint(AcquireBB);
3796 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3797 Builder.CreateBr(ContBB);
3798 SI->addCase(Builder.getInt32(1), AcquireBB);
3799 SI->addCase(Builder.getInt32(2), AcquireBB);
3800
3801 Builder.SetInsertPoint(ReleaseBB);
3802 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3803 Builder.CreateBr(ContBB);
3804 SI->addCase(Builder.getInt32(3), ReleaseBB);
3805
3806 Builder.SetInsertPoint(AcqRelBB);
3807 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3808 Builder.CreateBr(ContBB);
3809 SI->addCase(Builder.getInt32(4), AcqRelBB);
3810
3811 Builder.SetInsertPoint(SeqCstBB);
3812 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3813 Builder.CreateBr(ContBB);
3814 SI->addCase(Builder.getInt32(5), SeqCstBB);
3815
3816 Builder.SetInsertPoint(ContBB);
3817 return RValue::get(nullptr);
3818 }
3819
3820 case Builtin::BI__builtin_signbit:
3821 case Builtin::BI__builtin_signbitf:
3822 case Builtin::BI__builtin_signbitl: {
3823 return RValue::get(
3824 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3825 ConvertType(E->getType())));
3826 }
3827 case Builtin::BI__warn_memset_zero_len:
3828 return RValue::getIgnored();
3829 case Builtin::BI__annotation: {
3830 // Re-encode each wide string to UTF8 and make an MDString.
3831 SmallVector<Metadata *, 1> Strings;
3832 for (const Expr *Arg : E->arguments()) {
3833 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3834 assert(Str->getCharByteWidth() == 2);
3835 StringRef WideBytes = Str->getBytes();
3836 std::string StrUtf8;
3837 if (!convertUTF16ToUTF8String(
3838 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3839 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3840 continue;
3841 }
3842 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3843 }
3844
3845 // Build and MDTuple of MDStrings and emit the intrinsic call.
3846 llvm::Function *F =
3847 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3848 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3849 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3850 return RValue::getIgnored();
3851 }
3852 case Builtin::BI__builtin_annotation: {
3853 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3854 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3855 AnnVal->getType());
3856
3857 // Get the annotation string, go through casts. Sema requires this to be a
3858 // non-wide string literal, potentially casted, so the cast<> is safe.
3859 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3860 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3861 return RValue::get(
3862 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
3863 }
3864 case Builtin::BI__builtin_addcb:
3865 case Builtin::BI__builtin_addcs:
3866 case Builtin::BI__builtin_addc:
3867 case Builtin::BI__builtin_addcl:
3868 case Builtin::BI__builtin_addcll:
3869 case Builtin::BI__builtin_subcb:
3870 case Builtin::BI__builtin_subcs:
3871 case Builtin::BI__builtin_subc:
3872 case Builtin::BI__builtin_subcl:
3873 case Builtin::BI__builtin_subcll: {
3874
3875 // We translate all of these builtins from expressions of the form:
3876 // int x = ..., y = ..., carryin = ..., carryout, result;
3877 // result = __builtin_addc(x, y, carryin, &carryout);
3878 //
3879 // to LLVM IR of the form:
3880 //
3881 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3882 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3883 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3884 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3885 // i32 %carryin)
3886 // %result = extractvalue {i32, i1} %tmp2, 0
3887 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3888 // %tmp3 = or i1 %carry1, %carry2
3889 // %tmp4 = zext i1 %tmp3 to i32
3890 // store i32 %tmp4, i32* %carryout
3891
3892 // Scalarize our inputs.
3893 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3894 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3895 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3896 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3897
3898 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3899 llvm::Intrinsic::ID IntrinsicId;
3900 switch (BuiltinID) {
3901 default: llvm_unreachable("Unknown multiprecision builtin id.");
3902 case Builtin::BI__builtin_addcb:
3903 case Builtin::BI__builtin_addcs:
3904 case Builtin::BI__builtin_addc:
3905 case Builtin::BI__builtin_addcl:
3906 case Builtin::BI__builtin_addcll:
3907 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3908 break;
3909 case Builtin::BI__builtin_subcb:
3910 case Builtin::BI__builtin_subcs:
3911 case Builtin::BI__builtin_subc:
3912 case Builtin::BI__builtin_subcl:
3913 case Builtin::BI__builtin_subcll:
3914 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3915 break;
3916 }
3917
3918 // Construct our resulting LLVM IR expression.
3919 llvm::Value *Carry1;
3920 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3921 X, Y, Carry1);
3922 llvm::Value *Carry2;
3923 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3924 Sum1, Carryin, Carry2);
3925 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3926 X->getType());
3927 Builder.CreateStore(CarryOut, CarryOutPtr);
3928 return RValue::get(Sum2);
3929 }
3930
3931 case Builtin::BI__builtin_add_overflow:
3932 case Builtin::BI__builtin_sub_overflow:
3933 case Builtin::BI__builtin_mul_overflow: {
3934 const clang::Expr *LeftArg = E->getArg(0);
3935 const clang::Expr *RightArg = E->getArg(1);
3936 const clang::Expr *ResultArg = E->getArg(2);
3937
3938 clang::QualType ResultQTy =
3939 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3940
3941 WidthAndSignedness LeftInfo =
3942 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3943 WidthAndSignedness RightInfo =
3944 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3945 WidthAndSignedness ResultInfo =
3946 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3947
3948 // Handle mixed-sign multiplication as a special case, because adding
3949 // runtime or backend support for our generic irgen would be too expensive.
3950 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3951 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3952 RightInfo, ResultArg, ResultQTy,
3953 ResultInfo);
3954
3955 WidthAndSignedness EncompassingInfo =
3956 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3957
3958 llvm::Type *EncompassingLLVMTy =
3959 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3960
3961 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3962
3963 llvm::Intrinsic::ID IntrinsicId;
3964 switch (BuiltinID) {
3965 default:
3966 llvm_unreachable("Unknown overflow builtin id.");
3967 case Builtin::BI__builtin_add_overflow:
3968 IntrinsicId = EncompassingInfo.Signed
3969 ? llvm::Intrinsic::sadd_with_overflow
3970 : llvm::Intrinsic::uadd_with_overflow;
3971 break;
3972 case Builtin::BI__builtin_sub_overflow:
3973 IntrinsicId = EncompassingInfo.Signed
3974 ? llvm::Intrinsic::ssub_with_overflow
3975 : llvm::Intrinsic::usub_with_overflow;
3976 break;
3977 case Builtin::BI__builtin_mul_overflow:
3978 IntrinsicId = EncompassingInfo.Signed
3979 ? llvm::Intrinsic::smul_with_overflow
3980 : llvm::Intrinsic::umul_with_overflow;
3981 break;
3982 }
3983
3984 llvm::Value *Left = EmitScalarExpr(LeftArg);
3985 llvm::Value *Right = EmitScalarExpr(RightArg);
3986 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3987
3988 // Extend each operand to the encompassing type.
3989 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3990 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3991
3992 // Perform the operation on the extended values.
3993 llvm::Value *Overflow, *Result;
3994 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3995
3996 if (EncompassingInfo.Width > ResultInfo.Width) {
3997 // The encompassing type is wider than the result type, so we need to
3998 // truncate it.
3999 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4000
4001 // To see if the truncation caused an overflow, we will extend
4002 // the result and then compare it to the original result.
4003 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4004 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4005 llvm::Value *TruncationOverflow =
4006 Builder.CreateICmpNE(Result, ResultTruncExt);
4007
4008 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4009 Result = ResultTrunc;
4010 }
4011
4012 // Finally, store the result using the pointer.
4013 bool isVolatile =
4014 ResultArg->getType()->getPointeeType().isVolatileQualified();
4015 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4016
4017 return RValue::get(Overflow);
4018 }
4019
4020 case Builtin::BI__builtin_uadd_overflow:
4021 case Builtin::BI__builtin_uaddl_overflow:
4022 case Builtin::BI__builtin_uaddll_overflow:
4023 case Builtin::BI__builtin_usub_overflow:
4024 case Builtin::BI__builtin_usubl_overflow:
4025 case Builtin::BI__builtin_usubll_overflow:
4026 case Builtin::BI__builtin_umul_overflow:
4027 case Builtin::BI__builtin_umull_overflow:
4028 case Builtin::BI__builtin_umulll_overflow:
4029 case Builtin::BI__builtin_sadd_overflow:
4030 case Builtin::BI__builtin_saddl_overflow:
4031 case Builtin::BI__builtin_saddll_overflow:
4032 case Builtin::BI__builtin_ssub_overflow:
4033 case Builtin::BI__builtin_ssubl_overflow:
4034 case Builtin::BI__builtin_ssubll_overflow:
4035 case Builtin::BI__builtin_smul_overflow:
4036 case Builtin::BI__builtin_smull_overflow:
4037 case Builtin::BI__builtin_smulll_overflow: {
4038
4039 // We translate all of these builtins directly to the relevant llvm IR node.
4040
4041 // Scalarize our inputs.
4042 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4043 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4044 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4045
4046 // Decide which of the overflow intrinsics we are lowering to:
4047 llvm::Intrinsic::ID IntrinsicId;
4048 switch (BuiltinID) {
4049 default: llvm_unreachable("Unknown overflow builtin id.");
4050 case Builtin::BI__builtin_uadd_overflow:
4051 case Builtin::BI__builtin_uaddl_overflow:
4052 case Builtin::BI__builtin_uaddll_overflow:
4053 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4054 break;
4055 case Builtin::BI__builtin_usub_overflow:
4056 case Builtin::BI__builtin_usubl_overflow:
4057 case Builtin::BI__builtin_usubll_overflow:
4058 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4059 break;
4060 case Builtin::BI__builtin_umul_overflow:
4061 case Builtin::BI__builtin_umull_overflow:
4062 case Builtin::BI__builtin_umulll_overflow:
4063 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4064 break;
4065 case Builtin::BI__builtin_sadd_overflow:
4066 case Builtin::BI__builtin_saddl_overflow:
4067 case Builtin::BI__builtin_saddll_overflow:
4068 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4069 break;
4070 case Builtin::BI__builtin_ssub_overflow:
4071 case Builtin::BI__builtin_ssubl_overflow:
4072 case Builtin::BI__builtin_ssubll_overflow:
4073 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4074 break;
4075 case Builtin::BI__builtin_smul_overflow:
4076 case Builtin::BI__builtin_smull_overflow:
4077 case Builtin::BI__builtin_smulll_overflow:
4078 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4079 break;
4080 }
4081
4082
4083 llvm::Value *Carry;
4084 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4085 Builder.CreateStore(Sum, SumOutPtr);
4086
4087 return RValue::get(Carry);
4088 }
4089 case Builtin::BI__builtin_addressof:
4090 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4091 case Builtin::BI__builtin_operator_new:
4092 return EmitBuiltinNewDeleteCall(
4093 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4094 case Builtin::BI__builtin_operator_delete:
4095 return EmitBuiltinNewDeleteCall(
4096 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4097
4098 case Builtin::BI__builtin_is_aligned:
4099 return EmitBuiltinIsAligned(E);
4100 case Builtin::BI__builtin_align_up:
4101 return EmitBuiltinAlignTo(E, true);
4102 case Builtin::BI__builtin_align_down:
4103 return EmitBuiltinAlignTo(E, false);
4104
4105 case Builtin::BI__noop:
4106 // __noop always evaluates to an integer literal zero.
4107 return RValue::get(ConstantInt::get(IntTy, 0));
4108 case Builtin::BI__builtin_call_with_static_chain: {
4109 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4110 const Expr *Chain = E->getArg(1);
4111 return EmitCall(Call->getCallee()->getType(),
4112 EmitCallee(Call->getCallee()), Call, ReturnValue,
4113 EmitScalarExpr(Chain));
4114 }
4115 case Builtin::BI_InterlockedExchange8:
4116 case Builtin::BI_InterlockedExchange16:
4117 case Builtin::BI_InterlockedExchange:
4118 case Builtin::BI_InterlockedExchangePointer:
4119 return RValue::get(
4120 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4121 case Builtin::BI_InterlockedCompareExchangePointer:
4122 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4123 llvm::Type *RTy;
4124 llvm::IntegerType *IntType =
4125 IntegerType::get(getLLVMContext(),
4126 getContext().getTypeSize(E->getType()));
4127 llvm::Type *IntPtrType = IntType->getPointerTo();
4128
4129 llvm::Value *Destination =
4130 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
4131
4132 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4133 RTy = Exchange->getType();
4134 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4135
4136 llvm::Value *Comparand =
4137 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4138
4139 auto Ordering =
4140 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4141 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4142
4143 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4144 Ordering, Ordering);
4145 Result->setVolatile(true);
4146
4147 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4148 0),
4149 RTy));
4150 }
4151 case Builtin::BI_InterlockedCompareExchange8:
4152 case Builtin::BI_InterlockedCompareExchange16:
4153 case Builtin::BI_InterlockedCompareExchange:
4154 case Builtin::BI_InterlockedCompareExchange64:
4155 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4156 case Builtin::BI_InterlockedIncrement16:
4157 case Builtin::BI_InterlockedIncrement:
4158 return RValue::get(
4159 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4160 case Builtin::BI_InterlockedDecrement16:
4161 case Builtin::BI_InterlockedDecrement:
4162 return RValue::get(
4163 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4164 case Builtin::BI_InterlockedAnd8:
4165 case Builtin::BI_InterlockedAnd16:
4166 case Builtin::BI_InterlockedAnd:
4167 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4168 case Builtin::BI_InterlockedExchangeAdd8:
4169 case Builtin::BI_InterlockedExchangeAdd16:
4170 case Builtin::BI_InterlockedExchangeAdd:
4171 return RValue::get(
4172 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4173 case Builtin::BI_InterlockedExchangeSub8:
4174 case Builtin::BI_InterlockedExchangeSub16:
4175 case Builtin::BI_InterlockedExchangeSub:
4176 return RValue::get(
4177 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4178 case Builtin::BI_InterlockedOr8:
4179 case Builtin::BI_InterlockedOr16:
4180 case Builtin::BI_InterlockedOr:
4181 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4182 case Builtin::BI_InterlockedXor8:
4183 case Builtin::BI_InterlockedXor16:
4184 case Builtin::BI_InterlockedXor:
4185 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4186
4187 case Builtin::BI_bittest64:
4188 case Builtin::BI_bittest:
4189 case Builtin::BI_bittestandcomplement64:
4190 case Builtin::BI_bittestandcomplement:
4191 case Builtin::BI_bittestandreset64:
4192 case Builtin::BI_bittestandreset:
4193 case Builtin::BI_bittestandset64:
4194 case Builtin::BI_bittestandset:
4195 case Builtin::BI_interlockedbittestandreset:
4196 case Builtin::BI_interlockedbittestandreset64:
4197 case Builtin::BI_interlockedbittestandset64:
4198 case Builtin::BI_interlockedbittestandset:
4199 case Builtin::BI_interlockedbittestandset_acq:
4200 case Builtin::BI_interlockedbittestandset_rel:
4201 case Builtin::BI_interlockedbittestandset_nf:
4202 case Builtin::BI_interlockedbittestandreset_acq:
4203 case Builtin::BI_interlockedbittestandreset_rel:
4204 case Builtin::BI_interlockedbittestandreset_nf:
4205 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4206
4207 // These builtins exist to emit regular volatile loads and stores not
4208 // affected by the -fms-volatile setting.
4209 case Builtin::BI__iso_volatile_load8:
4210 case Builtin::BI__iso_volatile_load16:
4211 case Builtin::BI__iso_volatile_load32:
4212 case Builtin::BI__iso_volatile_load64:
4213 return RValue::get(EmitISOVolatileLoad(*this, E));
4214 case Builtin::BI__iso_volatile_store8:
4215 case Builtin::BI__iso_volatile_store16:
4216 case Builtin::BI__iso_volatile_store32:
4217 case Builtin::BI__iso_volatile_store64:
4218 return RValue::get(EmitISOVolatileStore(*this, E));
4219
4220 case Builtin::BI__exception_code:
4221 case Builtin::BI_exception_code:
4222 return RValue::get(EmitSEHExceptionCode());
4223 case Builtin::BI__exception_info:
4224 case Builtin::BI_exception_info:
4225 return RValue::get(EmitSEHExceptionInfo());
4226 case Builtin::BI__abnormal_termination:
4227 case Builtin::BI_abnormal_termination:
4228 return RValue::get(EmitSEHAbnormalTermination());
4229 case Builtin::BI_setjmpex:
4230 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4231 E->getArg(0)->getType()->isPointerType())
4232 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4233 break;
4234 case Builtin::BI_setjmp:
4235 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4236 E->getArg(0)->getType()->isPointerType()) {
4237 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4238 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4239 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4240 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4241 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4242 }
4243 break;
4244
4245 case Builtin::BI__GetExceptionInfo: {
4246 if (llvm::GlobalVariable *GV =
4247 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4248 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4249 break;
4250 }
4251
4252 case Builtin::BI__fastfail:
4253 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4254
4255 case Builtin::BI__builtin_coro_size: {
4256 auto & Context = getContext();
4257 auto SizeTy = Context.getSizeType();
4258 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4259 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
4260 return RValue::get(Builder.CreateCall(F));
4261 }
4262
4263 case Builtin::BI__builtin_coro_id:
4264 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4265 case Builtin::BI__builtin_coro_promise:
4266 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4267 case Builtin::BI__builtin_coro_resume:
4268 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4269 case Builtin::BI__builtin_coro_frame:
4270 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4271 case Builtin::BI__builtin_coro_noop:
4272 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4273 case Builtin::BI__builtin_coro_free:
4274 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4275 case Builtin::BI__builtin_coro_destroy:
4276 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4277 case Builtin::BI__builtin_coro_done:
4278 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4279 case Builtin::BI__builtin_coro_alloc:
4280 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4281 case Builtin::BI__builtin_coro_begin:
4282 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4283 case Builtin::BI__builtin_coro_end:
4284 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4285 case Builtin::BI__builtin_coro_suspend:
4286 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4287 case Builtin::BI__builtin_coro_param:
4288 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
4289
4290 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4291 case Builtin::BIread_pipe:
4292 case Builtin::BIwrite_pipe: {
4293 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4294 *Arg1 = EmitScalarExpr(E->getArg(1));
4295 CGOpenCLRuntime OpenCLRT(CGM);
4296 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4297 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4298
4299 // Type of the generic packet parameter.
4300 unsigned GenericAS =
4301 getContext().getTargetAddressSpace(LangAS::opencl_generic);
4302 llvm::Type *I8PTy = llvm::PointerType::get(
4303 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4304
4305 // Testing which overloaded version we should generate the call for.
4306 if (2U == E->getNumArgs()) {
4307 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4308 : "__write_pipe_2";
4309 // Creating a generic function type to be able to call with any builtin or
4310 // user defined type.
4311 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4312 llvm::FunctionType *FTy = llvm::FunctionType::get(
4313 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4314 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4315 return RValue::get(
4316 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4317 {Arg0, BCast, PacketSize, PacketAlign}));
4318 } else {
4319 assert(4 == E->getNumArgs() &&
4320 "Illegal number of parameters to pipe function");
4321 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4322 : "__write_pipe_4";
4323
4324 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4325 Int32Ty, Int32Ty};
4326 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4327 *Arg3 = EmitScalarExpr(E->getArg(3));
4328 llvm::FunctionType *FTy = llvm::FunctionType::get(
4329 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4330 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4331 // We know the third argument is an integer type, but we may need to cast
4332 // it to i32.
4333 if (Arg2->getType() != Int32Ty)
4334 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4335 return RValue::get(Builder.CreateCall(
4336 CGM.CreateRuntimeFunction(FTy, Name),
4337 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4338 }
4339 }
4340 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4341 // functions
4342 case Builtin::BIreserve_read_pipe:
4343 case Builtin::BIreserve_write_pipe:
4344 case Builtin::BIwork_group_reserve_read_pipe:
4345 case Builtin::BIwork_group_reserve_write_pipe:
4346 case Builtin::BIsub_group_reserve_read_pipe:
4347 case Builtin::BIsub_group_reserve_write_pipe: {
4348 // Composing the mangled name for the function.
4349 const char *Name;
4350 if (BuiltinID == Builtin::BIreserve_read_pipe)
4351 Name = "__reserve_read_pipe";
4352 else if (BuiltinID == Builtin::BIreserve_write_pipe)
4353 Name = "__reserve_write_pipe";
4354 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4355 Name = "__work_group_reserve_read_pipe";
4356 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4357 Name = "__work_group_reserve_write_pipe";
4358 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4359 Name = "__sub_group_reserve_read_pipe";
4360 else
4361 Name = "__sub_group_reserve_write_pipe";
4362
4363 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4364 *Arg1 = EmitScalarExpr(E->getArg(1));
4365 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4366 CGOpenCLRuntime OpenCLRT(CGM);
4367 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4368 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4369
4370 // Building the generic function prototype.
4371 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4372 llvm::FunctionType *FTy = llvm::FunctionType::get(
4373 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4374 // We know the second argument is an integer type, but we may need to cast
4375 // it to i32.
4376 if (Arg1->getType() != Int32Ty)
4377 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4378 return RValue::get(
4379 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4380 {Arg0, Arg1, PacketSize, PacketAlign}));
4381 }
4382 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4383 // functions
4384 case Builtin::BIcommit_read_pipe:
4385 case Builtin::BIcommit_write_pipe:
4386 case Builtin::BIwork_group_commit_read_pipe:
4387 case Builtin::BIwork_group_commit_write_pipe:
4388 case Builtin::BIsub_group_commit_read_pipe:
4389 case Builtin::BIsub_group_commit_write_pipe: {
4390 const char *Name;
4391 if (BuiltinID == Builtin::BIcommit_read_pipe)
4392 Name = "__commit_read_pipe";
4393 else if (BuiltinID == Builtin::BIcommit_write_pipe)
4394 Name = "__commit_write_pipe";
4395 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4396 Name = "__work_group_commit_read_pipe";
4397 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4398 Name = "__work_group_commit_write_pipe";
4399 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4400 Name = "__sub_group_commit_read_pipe";
4401 else
4402 Name = "__sub_group_commit_write_pipe";
4403
4404 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4405 *Arg1 = EmitScalarExpr(E->getArg(1));
4406 CGOpenCLRuntime OpenCLRT(CGM);
4407 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4408 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4409
4410 // Building the generic function prototype.
4411 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
4412 llvm::FunctionType *FTy =
4413 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
4414 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4415
4416 return RValue::get(
4417 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4418 {Arg0, Arg1, PacketSize, PacketAlign}));
4419 }
4420 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
4421 case Builtin::BIget_pipe_num_packets:
4422 case Builtin::BIget_pipe_max_packets: {
4423 const char *BaseName;
4424 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
4425 if (BuiltinID == Builtin::BIget_pipe_num_packets)
4426 BaseName = "__get_pipe_num_packets";
4427 else
4428 BaseName = "__get_pipe_max_packets";
4429 std::string Name = std::string(BaseName) +
4430 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
4431
4432 // Building the generic function prototype.
4433 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4434 CGOpenCLRuntime OpenCLRT(CGM);
4435 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4436 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4437 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
4438 llvm::FunctionType *FTy = llvm::FunctionType::get(
4439 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4440
4441 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4442 {Arg0, PacketSize, PacketAlign}));
4443 }
4444
4445 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
4446 case Builtin::BIto_global:
4447 case Builtin::BIto_local:
4448 case Builtin::BIto_private: {
4449 auto Arg0 = EmitScalarExpr(E->getArg(0));
4450 auto NewArgT = llvm::PointerType::get(Int8Ty,
4451 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4452 auto NewRetT = llvm::PointerType::get(Int8Ty,
4453 CGM.getContext().getTargetAddressSpace(
4454 E->getType()->getPointeeType().getAddressSpace()));
4455 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4456 llvm::Value *NewArg;
4457 if (Arg0->getType()->getPointerAddressSpace() !=
4458 NewArgT->getPointerAddressSpace())
4459 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4460 else
4461 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4462 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4463 auto NewCall =
4464 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4465 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4466 ConvertType(E->getType())));
4467 }
4468
4469 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4470 // It contains four different overload formats specified in Table 6.13.17.1.
4471 case Builtin::BIenqueue_kernel: {
4472 StringRef Name; // Generated function call name
4473 unsigned NumArgs = E->getNumArgs();
4474
4475 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4476 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4477 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4478
4479 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4480 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4481 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4482 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4483 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4484
4485 if (NumArgs == 4) {
4486 // The most basic form of the call with parameters:
4487 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4488 Name = "__enqueue_kernel_basic";
4489 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4490 GenericVoidPtrTy};
4491 llvm::FunctionType *FTy = llvm::FunctionType::get(
4492 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4493
4494 auto Info =
4495 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4496 llvm::Value *Kernel =
4497 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4498 llvm::Value *Block =
4499 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4500
4501 AttrBuilder B;
4502 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4503 llvm::AttributeList ByValAttrSet =
4504 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4505
4506 auto RTCall =
4507 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4508 {Queue, Flags, Range, Kernel, Block});
4509 RTCall->setAttributes(ByValAttrSet);
4510 return RValue::get(RTCall);
4511 }
4512 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
4513
4514 // Create a temporary array to hold the sizes of local pointer arguments
4515 // for the block. \p First is the position of the first size argument.
4516 auto CreateArrayForSizeVar = [=](unsigned First)
4517 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4518 llvm::APInt ArraySize(32, NumArgs - First);
4519 QualType SizeArrayTy = getContext().getConstantArrayType(
4520 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4521 /*IndexTypeQuals=*/0);
4522 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4523 llvm::Value *TmpPtr = Tmp.getPointer();
4524 llvm::Value *TmpSize = EmitLifetimeStart(
4525 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4526 llvm::Value *ElemPtr;
4527 // Each of the following arguments specifies the size of the corresponding
4528 // argument passed to the enqueued block.
4529 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4530 for (unsigned I = First; I < NumArgs; ++I) {
4531 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4532 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4533 if (I == First)
4534 ElemPtr = GEP;
4535 auto *V =
4536 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4537 Builder.CreateAlignedStore(
4538 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4539 }
4540 return std::tie(ElemPtr, TmpSize, TmpPtr);
4541 };
4542
4543 // Could have events and/or varargs.
4544 if (E->getArg(3)->getType()->isBlockPointerType()) {
4545 // No events passed, but has variadic arguments.
4546 Name = "__enqueue_kernel_varargs";
4547 auto Info =
4548 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4549 llvm::Value *Kernel =
4550 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4551 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4552 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4553 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4554
4555 // Create a vector of the arguments, as well as a constant value to
4556 // express to the runtime the number of variadic arguments.
4557 llvm::Value *const Args[] = {Queue, Flags,
4558 Range, Kernel,
4559 Block, ConstantInt::get(IntTy, NumArgs - 4),
4560 ElemPtr};
4561 llvm::Type *const ArgTys[] = {
4562 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4563 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4564
4565 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4566 auto Call = RValue::get(
4567 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4568 if (TmpSize)
4569 EmitLifetimeEnd(TmpSize, TmpPtr);
4570 return Call;
4571 }
4572 // Any calls now have event arguments passed.
4573 if (NumArgs >= 7) {
4574 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4575 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4576 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4577
4578 llvm::Value *NumEvents =
4579 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4580
4581 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4582 // to be a null pointer constant (including `0` literal), we can take it
4583 // into account and emit null pointer directly.
4584 llvm::Value *EventWaitList = nullptr;
4585 if (E->getArg(4)->isNullPointerConstant(
4586 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4587 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4588 } else {
4589 EventWaitList = E->getArg(4)->getType()->isArrayType()
4590 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4591 : EmitScalarExpr(E->getArg(4));
4592 // Convert to generic address space.
4593 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4594 }
4595 llvm::Value *EventRet = nullptr;
4596 if (E->getArg(5)->isNullPointerConstant(
4597 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4598 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4599 } else {
4600 EventRet =
4601 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4602 }
4603
4604 auto Info =
4605 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4606 llvm::Value *Kernel =
4607 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4608 llvm::Value *Block =
4609 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4610
4611 std::vector<llvm::Type *> ArgTys = {
4612 QueueTy, Int32Ty, RangeTy, Int32Ty,
4613 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4614
4615 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4616 NumEvents, EventWaitList, EventRet,
4617 Kernel, Block};
4618
4619 if (NumArgs == 7) {
4620 // Has events but no variadics.
4621 Name = "__enqueue_kernel_basic_events";
4622 llvm::FunctionType *FTy = llvm::FunctionType::get(
4623 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4624 return RValue::get(
4625 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4626 llvm::ArrayRef<llvm::Value *>(Args)));
4627 }
4628 // Has event info and variadics
4629 // Pass the number of variadics to the runtime function too.
4630 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4631 ArgTys.push_back(Int32Ty);
4632 Name = "__enqueue_kernel_events_varargs";
4633
4634 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4635 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4636 Args.push_back(ElemPtr);
4637 ArgTys.push_back(ElemPtr->getType());
4638
4639 llvm::FunctionType *FTy = llvm::FunctionType::get(
4640 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4641 auto Call =
4642 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4643 llvm::ArrayRef<llvm::Value *>(Args)));
4644 if (TmpSize)
4645 EmitLifetimeEnd(TmpSize, TmpPtr);
4646 return Call;
4647 }
4648 LLVM_FALLTHROUGH;
4649 }
4650 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4651 // parameter.
4652 case Builtin::BIget_kernel_work_group_size: {
4653 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4654 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4655 auto Info =
4656 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4657 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4658 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4659 return RValue::get(Builder.CreateCall(
4660 CGM.CreateRuntimeFunction(
4661 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4662 false),
4663 "__get_kernel_work_group_size_impl"),
4664 {Kernel, Arg}));
4665 }
4666 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4667 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4668 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4669 auto Info =
4670 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4671 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4672 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4673 return RValue::get(Builder.CreateCall(
4674 CGM.CreateRuntimeFunction(
4675 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4676 false),
4677 "__get_kernel_preferred_work_group_size_multiple_impl"),
4678 {Kernel, Arg}));
4679 }
4680 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4681 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4682 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4683 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4684 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4685 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4686 auto Info =
4687 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4688 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4689 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4690 const char *Name =
4691 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4692 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4693 : "__get_kernel_sub_group_count_for_ndrange_impl";
4694 return RValue::get(Builder.CreateCall(
4695 CGM.CreateRuntimeFunction(
4696 llvm::FunctionType::get(
4697 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4698 false),
4699 Name),
4700 {NDRange, Kernel, Block}));
4701 }
4702
4703 case Builtin::BI__builtin_store_half:
4704 case Builtin::BI__builtin_store_halff: {
4705 Value *Val = EmitScalarExpr(E->getArg(0));
4706 Address Address = EmitPointerWithAlignment(E->getArg(1));
4707 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4708 return RValue::get(Builder.CreateStore(HalfVal, Address));
4709 }
4710 case Builtin::BI__builtin_load_half: {
4711 Address Address = EmitPointerWithAlignment(E->getArg(0));
4712 Value *HalfVal = Builder.CreateLoad(Address);
4713 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4714 }
4715 case Builtin::BI__builtin_load_halff: {
4716 Address Address = EmitPointerWithAlignment(E->getArg(0));
4717 Value *HalfVal = Builder.CreateLoad(Address);
4718 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4719 }
4720 case Builtin::BIprintf:
4721 if (getTarget().getTriple().isNVPTX())
4722 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4723 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4724 getLangOpts().HIP)
4725 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4726 break;
4727 case Builtin::BI__builtin_canonicalize:
4728 case Builtin::BI__builtin_canonicalizef:
4729 case Builtin::BI__builtin_canonicalizef16:
4730 case Builtin::BI__builtin_canonicalizel:
4731 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4732
4733 case Builtin::BI__builtin_thread_pointer: {
4734 if (!getContext().getTargetInfo().isTLSSupported())
4735 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4736 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4737 break;
4738 }
4739 case Builtin::BI__builtin_os_log_format:
4740 return emitBuiltinOSLogFormat(*E);
4741
4742 case Builtin::BI__xray_customevent: {
4743 if (!ShouldXRayInstrumentFunction())
4744 return RValue::getIgnored();
4745
4746 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4747 XRayInstrKind::Custom))
4748 return RValue::getIgnored();
4749
4750 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4751 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4752 return RValue::getIgnored();
4753
4754 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4755 auto FTy = F->getFunctionType();
4756 auto Arg0 = E->getArg(0);
4757 auto Arg0Val = EmitScalarExpr(Arg0);
4758 auto Arg0Ty = Arg0->getType();
4759 auto PTy0 = FTy->getParamType(0);
4760 if (PTy0 != Arg0Val->getType()) {
4761 if (Arg0Ty->isArrayType())
4762 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4763 else
4764 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4765 }
4766 auto Arg1 = EmitScalarExpr(E->getArg(1));
4767 auto PTy1 = FTy->getParamType(1);
4768 if (PTy1 != Arg1->getType())
4769 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4770 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4771 }
4772
4773 case Builtin::BI__xray_typedevent: {
4774 // TODO: There should be a way to always emit events even if the current
4775 // function is not instrumented. Losing events in a stream can cripple
4776 // a trace.
4777 if (!ShouldXRayInstrumentFunction())
4778 return RValue::getIgnored();
4779
4780 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4781 XRayInstrKind::Typed))
4782 return RValue::getIgnored();
4783
4784 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4785 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4786 return RValue::getIgnored();
4787
4788 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4789 auto FTy = F->getFunctionType();
4790 auto Arg0 = EmitScalarExpr(E->getArg(0));
4791 auto PTy0 = FTy->getParamType(0);
4792 if (PTy0 != Arg0->getType())
4793 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4794 auto Arg1 = E->getArg(1);
4795 auto Arg1Val = EmitScalarExpr(Arg1);
4796 auto Arg1Ty = Arg1->getType();
4797 auto PTy1 = FTy->getParamType(1);
4798 if (PTy1 != Arg1Val->getType()) {
4799 if (Arg1Ty->isArrayType())
4800 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4801 else
4802 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4803 }
4804 auto Arg2 = EmitScalarExpr(E->getArg(2));
4805 auto PTy2 = FTy->getParamType(2);
4806 if (PTy2 != Arg2->getType())
4807 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4808 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4809 }
4810
4811 case Builtin::BI__builtin_ms_va_start:
4812 case Builtin::BI__builtin_ms_va_end:
4813 return RValue::get(
4814 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4815 BuiltinID == Builtin::BI__builtin_ms_va_start));
4816
4817 case Builtin::BI__builtin_ms_va_copy: {
4818 // Lower this manually. We can't reliably determine whether or not any
4819 // given va_copy() is for a Win64 va_list from the calling convention
4820 // alone, because it's legal to do this from a System V ABI function.
4821 // With opaque pointer types, we won't have enough information in LLVM
4822 // IR to determine this from the argument types, either. Best to do it
4823 // now, while we have enough information.
4824 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4825 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4826
4827 llvm::Type *BPP = Int8PtrPtrTy;
4828
4829 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4830 DestAddr.getAlignment());
4831 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4832 SrcAddr.getAlignment());
4833
4834 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4835 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4836 }
4837 }
4838
4839 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4840 // the call using the normal call path, but using the unmangled
4841 // version of the function name.
4842 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4843 return emitLibraryCall(*this, FD, E,
4844 CGM.getBuiltinLibFunction(FD, BuiltinID));
4845
4846 // If this is a predefined lib function (e.g. malloc), emit the call
4847 // using exactly the normal call path.
4848 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4849 return emitLibraryCall(*this, FD, E,
4850 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4851
4852 // Check that a call to a target specific builtin has the correct target
4853 // features.
4854 // This is down here to avoid non-target specific builtins, however, if
4855 // generic builtins start to require generic target features then we
4856 // can move this up to the beginning of the function.
4857 checkTargetFeatures(E, FD);
4858
4859 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4860 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4861
4862 // See if we have a target specific intrinsic.
4863 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4864 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4865 StringRef Prefix =
4866 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4867 if (!Prefix.empty()) {
4868 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4869 // NOTE we don't need to perform a compatibility flag check here since the
4870 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4871 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4872 if (IntrinsicID == Intrinsic::not_intrinsic)
4873 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4874 }
4875
4876 if (IntrinsicID != Intrinsic::not_intrinsic) {
4877 SmallVector<Value*, 16> Args;
4878
4879 // Find out if any arguments are required to be integer constant
4880 // expressions.
4881 unsigned ICEArguments = 0;
4882 ASTContext::GetBuiltinTypeError Error;
4883 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4884 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4885
4886 Function *F = CGM.getIntrinsic(IntrinsicID);
4887 llvm::FunctionType *FTy = F->getFunctionType();
4888
4889 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4890 Value *ArgValue;
4891 // If this is a normal argument, just emit it as a scalar.
4892 if ((ICEArguments & (1 << i)) == 0) {
4893 ArgValue = EmitScalarExpr(E->getArg(i));
4894 } else {
4895 // If this is required to be a constant, constant fold it so that we
4896 // know that the generated intrinsic gets a ConstantInt.
4897 ArgValue = llvm::ConstantInt::get(
4898 getLLVMContext(),
4899 *E->getArg(i)->getIntegerConstantExpr(getContext()));
4900 }
4901
4902 // If the intrinsic arg type is different from the builtin arg type
4903 // we need to do a bit cast.
4904 llvm::Type *PTy = FTy->getParamType(i);
4905 if (PTy != ArgValue->getType()) {
4906 // XXX - vector of pointers?
4907 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4908 if (PtrTy->getAddressSpace() !=
4909 ArgValue->getType()->getPointerAddressSpace()) {
4910 ArgValue = Builder.CreateAddrSpaceCast(
4911 ArgValue,
4912 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4913 }
4914 }
4915
4916 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
4917 "Must be able to losslessly bit cast to param");
4918 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4919 }
4920
4921 Args.push_back(ArgValue);
4922 }
4923
4924 Value *V = Builder.CreateCall(F, Args);
4925 QualType BuiltinRetType = E->getType();
4926
4927 llvm::Type *RetTy = VoidTy;
4928 if (!BuiltinRetType->isVoidType())
4929 RetTy = ConvertType(BuiltinRetType);
4930
4931 if (RetTy != V->getType()) {
4932 // XXX - vector of pointers?
4933 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4934 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4935 V = Builder.CreateAddrSpaceCast(
4936 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4937 }
4938 }
4939
4940 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
4941 "Must be able to losslessly bit cast result type");
4942 V = Builder.CreateBitCast(V, RetTy);
4943 }
4944
4945 return RValue::get(V);
4946 }
4947
4948 // Some target-specific builtins can have aggregate return values, e.g.
4949 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
4950 // ReturnValue to be non-null, so that the target-specific emission code can
4951 // always just emit into it.
4952 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
4953 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
4954 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
4955 ReturnValue = ReturnValueSlot(DestPtr, false);
4956 }
4957
4958 // Now see if we can emit a target-specific builtin.
4959 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
4960 switch (EvalKind) {
4961 case TEK_Scalar:
4962 return RValue::get(V);
4963 case TEK_Aggregate:
4964 return RValue::getAggregate(ReturnValue.getValue(),
4965 ReturnValue.isVolatile());
4966 case TEK_Complex:
4967 llvm_unreachable("No current target builtin returns complex");
4968 }
4969 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
4970 }
4971
4972 ErrorUnsupported(E, "builtin function");
4973
4974 // Unknown builtin, for now just dump it out and return undef.
4975 return GetUndefRValue(E->getType());
4976 }
4977
EmitTargetArchBuiltinExpr(CodeGenFunction * CGF,unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Triple::ArchType Arch)4978 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4979 unsigned BuiltinID, const CallExpr *E,
4980 ReturnValueSlot ReturnValue,
4981 llvm::Triple::ArchType Arch) {
4982 switch (Arch) {
4983 case llvm::Triple::arm:
4984 case llvm::Triple::armeb:
4985 case llvm::Triple::thumb:
4986 case llvm::Triple::thumbeb:
4987 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
4988 case llvm::Triple::aarch64:
4989 case llvm::Triple::aarch64_32:
4990 case llvm::Triple::aarch64_be:
4991 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4992 case llvm::Triple::bpfeb:
4993 case llvm::Triple::bpfel:
4994 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4995 case llvm::Triple::x86:
4996 case llvm::Triple::x86_64:
4997 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4998 case llvm::Triple::ppc:
4999 case llvm::Triple::ppc64:
5000 case llvm::Triple::ppc64le:
5001 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5002 case llvm::Triple::r600:
5003 case llvm::Triple::amdgcn:
5004 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5005 case llvm::Triple::systemz:
5006 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5007 case llvm::Triple::nvptx:
5008 case llvm::Triple::nvptx64:
5009 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5010 case llvm::Triple::wasm32:
5011 case llvm::Triple::wasm64:
5012 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5013 case llvm::Triple::hexagon:
5014 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5015 default:
5016 return nullptr;
5017 }
5018 }
5019
EmitTargetBuiltinExpr(unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue)5020 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5021 const CallExpr *E,
5022 ReturnValueSlot ReturnValue) {
5023 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5024 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5025 return EmitTargetArchBuiltinExpr(
5026 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5027 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5028 }
5029
5030 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5031 getTarget().getTriple().getArch());
5032 }
5033
GetNeonType(CodeGenFunction * CGF,NeonTypeFlags TypeFlags,bool HasLegalHalfType=true,bool V1Ty=false,bool AllowBFloatArgsAndRet=true)5034 static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5035 NeonTypeFlags TypeFlags,
5036 bool HasLegalHalfType = true,
5037 bool V1Ty = false,
5038 bool AllowBFloatArgsAndRet = true) {
5039 int IsQuad = TypeFlags.isQuad();
5040 switch (TypeFlags.getEltType()) {
5041 case NeonTypeFlags::Int8:
5042 case NeonTypeFlags::Poly8:
5043 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5044 case NeonTypeFlags::Int16:
5045 case NeonTypeFlags::Poly16:
5046 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5047 case NeonTypeFlags::BFloat16:
5048 if (AllowBFloatArgsAndRet)
5049 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5050 else
5051 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5052 case NeonTypeFlags::Float16:
5053 if (HasLegalHalfType)
5054 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5055 else
5056 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5057 case NeonTypeFlags::Int32:
5058 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5059 case NeonTypeFlags::Int64:
5060 case NeonTypeFlags::Poly64:
5061 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5062 case NeonTypeFlags::Poly128:
5063 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5064 // There is a lot of i128 and f128 API missing.
5065 // so we use v16i8 to represent poly128 and get pattern matched.
5066 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5067 case NeonTypeFlags::Float32:
5068 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5069 case NeonTypeFlags::Float64:
5070 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5071 }
5072 llvm_unreachable("Unknown vector element type!");
5073 }
5074
GetFloatNeonType(CodeGenFunction * CGF,NeonTypeFlags IntTypeFlags)5075 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5076 NeonTypeFlags IntTypeFlags) {
5077 int IsQuad = IntTypeFlags.isQuad();
5078 switch (IntTypeFlags.getEltType()) {
5079 case NeonTypeFlags::Int16:
5080 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5081 case NeonTypeFlags::Int32:
5082 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5083 case NeonTypeFlags::Int64:
5084 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5085 default:
5086 llvm_unreachable("Type can't be converted to floating-point!");
5087 }
5088 }
5089
EmitNeonSplat(Value * V,Constant * C,const ElementCount & Count)5090 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5091 const ElementCount &Count) {
5092 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5093 return Builder.CreateShuffleVector(V, V, SV, "lane");
5094 }
5095
EmitNeonSplat(Value * V,Constant * C)5096 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5097 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5098 return EmitNeonSplat(V, C, EC);
5099 }
5100
EmitNeonCall(Function * F,SmallVectorImpl<Value * > & Ops,const char * name,unsigned shift,bool rightshift)5101 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5102 const char *name,
5103 unsigned shift, bool rightshift) {
5104 unsigned j = 0;
5105 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5106 ai != ae; ++ai, ++j) {
5107 if (F->isConstrainedFPIntrinsic())
5108 if (ai->getType()->isMetadataTy())
5109 continue;
5110 if (shift > 0 && shift == j)
5111 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5112 else
5113 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5114 }
5115
5116 if (F->isConstrainedFPIntrinsic())
5117 return Builder.CreateConstrainedFPCall(F, Ops, name);
5118 else
5119 return Builder.CreateCall(F, Ops, name);
5120 }
5121
EmitNeonShiftVector(Value * V,llvm::Type * Ty,bool neg)5122 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5123 bool neg) {
5124 int SV = cast<ConstantInt>(V)->getSExtValue();
5125 return ConstantInt::get(Ty, neg ? -SV : SV);
5126 }
5127
5128 // Right-shift a vector by a constant.
EmitNeonRShiftImm(Value * Vec,Value * Shift,llvm::Type * Ty,bool usgn,const char * name)5129 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5130 llvm::Type *Ty, bool usgn,
5131 const char *name) {
5132 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5133
5134 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5135 int EltSize = VTy->getScalarSizeInBits();
5136
5137 Vec = Builder.CreateBitCast(Vec, Ty);
5138
5139 // lshr/ashr are undefined when the shift amount is equal to the vector
5140 // element size.
5141 if (ShiftAmt == EltSize) {
5142 if (usgn) {
5143 // Right-shifting an unsigned value by its size yields 0.
5144 return llvm::ConstantAggregateZero::get(VTy);
5145 } else {
5146 // Right-shifting a signed value by its size is equivalent
5147 // to a shift of size-1.
5148 --ShiftAmt;
5149 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5150 }
5151 }
5152
5153 Shift = EmitNeonShiftVector(Shift, Ty, false);
5154 if (usgn)
5155 return Builder.CreateLShr(Vec, Shift, name);
5156 else
5157 return Builder.CreateAShr(Vec, Shift, name);
5158 }
5159
5160 enum {
5161 AddRetType = (1 << 0),
5162 Add1ArgType = (1 << 1),
5163 Add2ArgTypes = (1 << 2),
5164
5165 VectorizeRetType = (1 << 3),
5166 VectorizeArgTypes = (1 << 4),
5167
5168 InventFloatType = (1 << 5),
5169 UnsignedAlts = (1 << 6),
5170
5171 Use64BitVectors = (1 << 7),
5172 Use128BitVectors = (1 << 8),
5173
5174 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5175 VectorRet = AddRetType | VectorizeRetType,
5176 VectorRetGetArgs01 =
5177 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5178 FpCmpzModifiers =
5179 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5180 };
5181
5182 namespace {
5183 struct ARMVectorIntrinsicInfo {
5184 const char *NameHint;
5185 unsigned BuiltinID;
5186 unsigned LLVMIntrinsic;
5187 unsigned AltLLVMIntrinsic;
5188 uint64_t TypeModifier;
5189
operator <__anon777ffcf60811::ARMVectorIntrinsicInfo5190 bool operator<(unsigned RHSBuiltinID) const {
5191 return BuiltinID < RHSBuiltinID;
5192 }
operator <__anon777ffcf60811::ARMVectorIntrinsicInfo5193 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5194 return BuiltinID < TE.BuiltinID;
5195 }
5196 };
5197 } // end anonymous namespace
5198
5199 #define NEONMAP0(NameBase) \
5200 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5201
5202 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5203 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5204 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5205
5206 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5207 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5208 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5209 TypeModifier }
5210
5211 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5212 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
5213 NEONMAP0(splat_lane_v),
5214 NEONMAP0(splat_laneq_v),
5215 NEONMAP0(splatq_lane_v),
5216 NEONMAP0(splatq_laneq_v),
5217 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5218 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5219 NEONMAP1(vabs_v, arm_neon_vabs, 0),
5220 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5221 NEONMAP0(vaddhn_v),
5222 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
5223 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
5224 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
5225 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
5226 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
5227 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
5228 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
5229 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
5230 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
5231 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5232 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5233 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5234 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5235 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5236 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5237 NEONMAP1(vcage_v, arm_neon_vacge, 0),
5238 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5239 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5240 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5241 NEONMAP1(vcale_v, arm_neon_vacge, 0),
5242 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5243 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5244 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5245 NEONMAP0(vceqz_v),
5246 NEONMAP0(vceqzq_v),
5247 NEONMAP0(vcgez_v),
5248 NEONMAP0(vcgezq_v),
5249 NEONMAP0(vcgtz_v),
5250 NEONMAP0(vcgtzq_v),
5251 NEONMAP0(vclez_v),
5252 NEONMAP0(vclezq_v),
5253 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5254 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5255 NEONMAP0(vcltz_v),
5256 NEONMAP0(vcltzq_v),
5257 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5258 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5259 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5260 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5261 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5262 NEONMAP0(vcvt_f16_v),
5263 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5264 NEONMAP0(vcvt_f32_v),
5265 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5266 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5267 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5268 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5269 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5270 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5271 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5272 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5273 NEONMAP0(vcvt_s16_v),
5274 NEONMAP0(vcvt_s32_v),
5275 NEONMAP0(vcvt_s64_v),
5276 NEONMAP0(vcvt_u16_v),
5277 NEONMAP0(vcvt_u32_v),
5278 NEONMAP0(vcvt_u64_v),
5279 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
5280 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5281 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5282 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
5283 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5284 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5285 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
5286 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5287 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5288 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
5289 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5290 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5291 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5292 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
5293 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5294 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5295 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
5296 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5297 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5298 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
5299 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5300 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5301 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
5302 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5303 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5304 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
5305 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5306 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5307 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
5308 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5309 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5310 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
5311 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5312 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5313 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
5314 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5315 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5316 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
5317 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5318 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5319 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
5320 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5321 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5322 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
5323 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5324 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5325 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
5326 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5327 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5328 NEONMAP0(vcvtq_f16_v),
5329 NEONMAP0(vcvtq_f32_v),
5330 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5331 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5332 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5333 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5334 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5335 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5336 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5337 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5338 NEONMAP0(vcvtq_s16_v),
5339 NEONMAP0(vcvtq_s32_v),
5340 NEONMAP0(vcvtq_s64_v),
5341 NEONMAP0(vcvtq_u16_v),
5342 NEONMAP0(vcvtq_u32_v),
5343 NEONMAP0(vcvtq_u64_v),
5344 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
5345 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
5346 NEONMAP0(vext_v),
5347 NEONMAP0(vextq_v),
5348 NEONMAP0(vfma_v),
5349 NEONMAP0(vfmaq_v),
5350 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5351 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5352 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5353 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5354 NEONMAP0(vld1_dup_v),
5355 NEONMAP1(vld1_v, arm_neon_vld1, 0),
5356 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
5357 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
5358 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
5359 NEONMAP0(vld1q_dup_v),
5360 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
5361 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
5362 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
5363 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
5364 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
5365 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
5366 NEONMAP1(vld2_v, arm_neon_vld2, 0),
5367 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
5368 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
5369 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
5370 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
5371 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
5372 NEONMAP1(vld3_v, arm_neon_vld3, 0),
5373 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
5374 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
5375 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
5376 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
5377 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
5378 NEONMAP1(vld4_v, arm_neon_vld4, 0),
5379 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
5380 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
5381 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
5382 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5383 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
5384 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
5385 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5386 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5387 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
5388 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
5389 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5390 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
5391 NEONMAP0(vmovl_v),
5392 NEONMAP0(vmovn_v),
5393 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
5394 NEONMAP0(vmull_v),
5395 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
5396 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5397 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5398 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
5399 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5400 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5401 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
5402 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
5403 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
5404 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
5405 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
5406 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5407 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5408 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
5409 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
5410 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
5411 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
5412 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
5413 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
5414 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
5415 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
5416 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
5417 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
5418 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
5419 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5420 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5421 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5422 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5423 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5424 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5425 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
5426 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
5427 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5428 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5429 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
5430 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5431 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5432 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
5433 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
5434 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5435 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5436 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
5437 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
5438 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
5439 NEONMAP0(vrndi_v),
5440 NEONMAP0(vrndiq_v),
5441 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
5442 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
5443 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
5444 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
5445 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
5446 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
5447 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
5448 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
5449 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
5450 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5451 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5452 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5453 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5454 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5455 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5456 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5457 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5458 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5459 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5460 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5461 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5462 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5463 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5464 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5465 NEONMAP0(vshl_n_v),
5466 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5467 NEONMAP0(vshll_n_v),
5468 NEONMAP0(vshlq_n_v),
5469 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5470 NEONMAP0(vshr_n_v),
5471 NEONMAP0(vshrn_n_v),
5472 NEONMAP0(vshrq_n_v),
5473 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5474 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5475 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5476 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5477 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5478 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5479 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5480 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5481 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5482 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5483 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5484 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5485 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5486 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5487 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5488 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5489 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5490 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5491 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5492 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5493 NEONMAP0(vsubhn_v),
5494 NEONMAP0(vtrn_v),
5495 NEONMAP0(vtrnq_v),
5496 NEONMAP0(vtst_v),
5497 NEONMAP0(vtstq_v),
5498 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5499 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5500 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5501 NEONMAP0(vuzp_v),
5502 NEONMAP0(vuzpq_v),
5503 NEONMAP0(vzip_v),
5504 NEONMAP0(vzipq_v)
5505 };
5506
5507 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5508 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5509 NEONMAP0(splat_lane_v),
5510 NEONMAP0(splat_laneq_v),
5511 NEONMAP0(splatq_lane_v),
5512 NEONMAP0(splatq_laneq_v),
5513 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5514 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5515 NEONMAP0(vaddhn_v),
5516 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5517 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5518 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5519 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5520 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5521 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5522 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5523 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5524 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5525 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5526 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5527 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5528 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5529 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5530 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5531 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5532 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5533 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5534 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5535 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5536 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5537 NEONMAP0(vceqz_v),
5538 NEONMAP0(vceqzq_v),
5539 NEONMAP0(vcgez_v),
5540 NEONMAP0(vcgezq_v),
5541 NEONMAP0(vcgtz_v),
5542 NEONMAP0(vcgtzq_v),
5543 NEONMAP0(vclez_v),
5544 NEONMAP0(vclezq_v),
5545 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5546 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5547 NEONMAP0(vcltz_v),
5548 NEONMAP0(vcltzq_v),
5549 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5550 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5551 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5552 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5553 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5554 NEONMAP0(vcvt_f16_v),
5555 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5556 NEONMAP0(vcvt_f32_v),
5557 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5558 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5559 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5560 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5561 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5562 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5563 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5564 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5565 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5566 NEONMAP0(vcvtq_f16_v),
5567 NEONMAP0(vcvtq_f32_v),
5568 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5569 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5570 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5571 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5572 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5573 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5574 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5575 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5576 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5577 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5578 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5579 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5580 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5581 NEONMAP0(vext_v),
5582 NEONMAP0(vextq_v),
5583 NEONMAP0(vfma_v),
5584 NEONMAP0(vfmaq_v),
5585 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5586 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5587 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5588 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5589 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5590 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5591 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5592 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5593 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5594 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5595 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5596 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5597 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5598 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5599 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5600 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5601 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5602 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5603 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5604 NEONMAP0(vmovl_v),
5605 NEONMAP0(vmovn_v),
5606 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5607 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5608 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5609 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5610 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5611 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5612 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5613 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5614 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5615 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5616 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5617 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5618 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5619 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5620 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5621 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5622 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5623 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5624 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5625 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5626 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5627 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5628 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5629 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5630 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5631 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5632 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5633 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5634 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5635 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5636 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5637 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5638 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5639 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5640 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5641 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5642 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5643 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5644 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5645 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5646 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5647 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5648 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5649 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5650 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5651 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5652 NEONMAP0(vrndi_v),
5653 NEONMAP0(vrndiq_v),
5654 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5655 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5656 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5657 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5658 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5659 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5660 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5661 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5662 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5663 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5664 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5665 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5666 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5667 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5668 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5669 NEONMAP0(vshl_n_v),
5670 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5671 NEONMAP0(vshll_n_v),
5672 NEONMAP0(vshlq_n_v),
5673 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5674 NEONMAP0(vshr_n_v),
5675 NEONMAP0(vshrn_n_v),
5676 NEONMAP0(vshrq_n_v),
5677 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5678 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5679 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5680 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5681 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5682 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5683 NEONMAP0(vsubhn_v),
5684 NEONMAP0(vtst_v),
5685 NEONMAP0(vtstq_v),
5686 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5687 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5688 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5689 };
5690
5691 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5692 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5693 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5694 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5695 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5696 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5697 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5698 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5699 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5700 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5701 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5702 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5703 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5704 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5705 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5706 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5707 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5708 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5709 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5710 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5711 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5712 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5713 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5714 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5715 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5716 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5717 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5718 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5719 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5720 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5721 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5722 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5723 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5724 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5725 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5726 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
5727 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5728 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5729 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5730 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5731 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5732 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5733 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5734 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5735 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5736 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5737 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5738 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5739 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5740 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5741 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5742 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5743 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5744 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5745 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5746 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5747 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5748 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5749 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5750 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5751 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5752 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5753 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5754 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5755 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5756 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5757 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5758 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5759 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5760 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5761 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5762 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5763 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5764 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5765 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5766 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5767 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5768 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5769 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5770 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5771 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5772 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5773 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5774 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5775 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5776 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5777 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5778 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5779 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5780 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5781 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5782 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5783 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5784 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5785 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5786 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5787 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5788 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5789 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5790 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5791 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5792 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5793 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5794 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5795 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5796 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5797 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5798 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5799 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5800 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5801 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5802 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5803 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5804 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5805 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5806 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5807 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5808 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5809 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5810 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5811 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5812 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5813 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5814 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5815 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5816 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5817 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5818 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5819 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5820 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5821 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5822 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5823 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5824 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5825 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5826 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5827 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5828 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5829 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5830 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5831 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5832 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5833 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5834 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5835 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5836 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5837 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5838 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5839 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5840 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5841 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5842 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5843 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5844 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5845 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5846 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5847 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5848 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5849 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5850 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5851 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5852 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5853 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5854 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5855 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5856 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5857 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5858 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5859 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5860 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5861 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5862 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5863 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5864 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5865 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5866 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5867 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5868 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5869 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5870 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5871 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5872 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5873 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5874 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5875 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5876 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5877 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5878 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5879 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5880 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5881 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5882 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5883 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5884 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5885 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5886 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5887 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5888 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5889 // FP16 scalar intrinisics go here.
5890 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5891 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5892 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5893 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5894 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5895 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5896 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5897 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5898 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5899 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5900 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5901 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5902 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5903 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5904 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5905 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5906 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5907 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5908 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5909 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5910 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5911 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5912 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5913 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5914 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5915 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5916 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5917 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5918 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5919 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5920 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5921 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5922 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5923 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5924 };
5925
5926 #undef NEONMAP0
5927 #undef NEONMAP1
5928 #undef NEONMAP2
5929
5930 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5931 { \
5932 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
5933 TypeModifier \
5934 }
5935
5936 #define SVEMAP2(NameBase, TypeModifier) \
5937 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
5938 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
5939 #define GET_SVE_LLVM_INTRINSIC_MAP
5940 #include "clang/Basic/arm_sve_builtin_cg.inc"
5941 #undef GET_SVE_LLVM_INTRINSIC_MAP
5942 };
5943
5944 #undef SVEMAP1
5945 #undef SVEMAP2
5946
5947 static bool NEONSIMDIntrinsicsProvenSorted = false;
5948
5949 static bool AArch64SIMDIntrinsicsProvenSorted = false;
5950 static bool AArch64SISDIntrinsicsProvenSorted = false;
5951 static bool AArch64SVEIntrinsicsProvenSorted = false;
5952
5953 static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,unsigned BuiltinID,bool & MapProvenSorted)5954 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
5955 unsigned BuiltinID, bool &MapProvenSorted) {
5956
5957 #ifndef NDEBUG
5958 if (!MapProvenSorted) {
5959 assert(llvm::is_sorted(IntrinsicMap));
5960 MapProvenSorted = true;
5961 }
5962 #endif
5963
5964 const ARMVectorIntrinsicInfo *Builtin =
5965 llvm::lower_bound(IntrinsicMap, BuiltinID);
5966
5967 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5968 return Builtin;
5969
5970 return nullptr;
5971 }
5972
LookupNeonLLVMIntrinsic(unsigned IntrinsicID,unsigned Modifier,llvm::Type * ArgType,const CallExpr * E)5973 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5974 unsigned Modifier,
5975 llvm::Type *ArgType,
5976 const CallExpr *E) {
5977 int VectorSize = 0;
5978 if (Modifier & Use64BitVectors)
5979 VectorSize = 64;
5980 else if (Modifier & Use128BitVectors)
5981 VectorSize = 128;
5982
5983 // Return type.
5984 SmallVector<llvm::Type *, 3> Tys;
5985 if (Modifier & AddRetType) {
5986 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5987 if (Modifier & VectorizeRetType)
5988 Ty = llvm::FixedVectorType::get(
5989 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5990
5991 Tys.push_back(Ty);
5992 }
5993
5994 // Arguments.
5995 if (Modifier & VectorizeArgTypes) {
5996 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5997 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
5998 }
5999
6000 if (Modifier & (Add1ArgType | Add2ArgTypes))
6001 Tys.push_back(ArgType);
6002
6003 if (Modifier & Add2ArgTypes)
6004 Tys.push_back(ArgType);
6005
6006 if (Modifier & InventFloatType)
6007 Tys.push_back(FloatTy);
6008
6009 return CGM.getIntrinsic(IntrinsicID, Tys);
6010 }
6011
EmitCommonNeonSISDBuiltinExpr(CodeGenFunction & CGF,const ARMVectorIntrinsicInfo & SISDInfo,SmallVectorImpl<Value * > & Ops,const CallExpr * E)6012 static Value *EmitCommonNeonSISDBuiltinExpr(
6013 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6014 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6015 unsigned BuiltinID = SISDInfo.BuiltinID;
6016 unsigned int Int = SISDInfo.LLVMIntrinsic;
6017 unsigned Modifier = SISDInfo.TypeModifier;
6018 const char *s = SISDInfo.NameHint;
6019
6020 switch (BuiltinID) {
6021 case NEON::BI__builtin_neon_vcled_s64:
6022 case NEON::BI__builtin_neon_vcled_u64:
6023 case NEON::BI__builtin_neon_vcles_f32:
6024 case NEON::BI__builtin_neon_vcled_f64:
6025 case NEON::BI__builtin_neon_vcltd_s64:
6026 case NEON::BI__builtin_neon_vcltd_u64:
6027 case NEON::BI__builtin_neon_vclts_f32:
6028 case NEON::BI__builtin_neon_vcltd_f64:
6029 case NEON::BI__builtin_neon_vcales_f32:
6030 case NEON::BI__builtin_neon_vcaled_f64:
6031 case NEON::BI__builtin_neon_vcalts_f32:
6032 case NEON::BI__builtin_neon_vcaltd_f64:
6033 // Only one direction of comparisons actually exist, cmle is actually a cmge
6034 // with swapped operands. The table gives us the right intrinsic but we
6035 // still need to do the swap.
6036 std::swap(Ops[0], Ops[1]);
6037 break;
6038 }
6039
6040 assert(Int && "Generic code assumes a valid intrinsic");
6041
6042 // Determine the type(s) of this overloaded AArch64 intrinsic.
6043 const Expr *Arg = E->getArg(0);
6044 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6045 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6046
6047 int j = 0;
6048 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6049 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6050 ai != ae; ++ai, ++j) {
6051 llvm::Type *ArgTy = ai->getType();
6052 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6053 ArgTy->getPrimitiveSizeInBits())
6054 continue;
6055
6056 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
6057 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6058 // it before inserting.
6059 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6060 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6061 Ops[j] =
6062 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
6063 }
6064
6065 Value *Result = CGF.EmitNeonCall(F, Ops, s);
6066 llvm::Type *ResultType = CGF.ConvertType(E->getType());
6067 if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
6068 Result->getType()->getPrimitiveSizeInBits().getFixedSize())
6069 return CGF.Builder.CreateExtractElement(Result, C0);
6070
6071 return CGF.Builder.CreateBitCast(Result, ResultType, s);
6072 }
6073
EmitCommonNeonBuiltinExpr(unsigned BuiltinID,unsigned LLVMIntrinsic,unsigned AltLLVMIntrinsic,const char * NameHint,unsigned Modifier,const CallExpr * E,SmallVectorImpl<llvm::Value * > & Ops,Address PtrOp0,Address PtrOp1,llvm::Triple::ArchType Arch)6074 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6075 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
6076 const char *NameHint, unsigned Modifier, const CallExpr *E,
6077 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
6078 llvm::Triple::ArchType Arch) {
6079 // Get the last argument, which specifies the vector type.
6080 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6081 Optional<llvm::APSInt> NeonTypeConst =
6082 Arg->getIntegerConstantExpr(getContext());
6083 if (!NeonTypeConst)
6084 return nullptr;
6085
6086 // Determine the type of this overloaded NEON intrinsic.
6087 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
6088 bool Usgn = Type.isUnsigned();
6089 bool Quad = Type.isQuad();
6090 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
6091 const bool AllowBFloatArgsAndRet =
6092 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
6093
6094 llvm::FixedVectorType *VTy =
6095 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
6096 llvm::Type *Ty = VTy;
6097 if (!Ty)
6098 return nullptr;
6099
6100 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6101 return Builder.getInt32(addr.getAlignment().getQuantity());
6102 };
6103
6104 unsigned Int = LLVMIntrinsic;
6105 if ((Modifier & UnsignedAlts) && !Usgn)
6106 Int = AltLLVMIntrinsic;
6107
6108 switch (BuiltinID) {
6109 default: break;
6110 case NEON::BI__builtin_neon_splat_lane_v:
6111 case NEON::BI__builtin_neon_splat_laneq_v:
6112 case NEON::BI__builtin_neon_splatq_lane_v:
6113 case NEON::BI__builtin_neon_splatq_laneq_v: {
6114 auto NumElements = VTy->getElementCount();
6115 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
6116 NumElements = NumElements * 2;
6117 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
6118 NumElements = NumElements.divideCoefficientBy(2);
6119
6120 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6121 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
6122 }
6123 case NEON::BI__builtin_neon_vpadd_v:
6124 case NEON::BI__builtin_neon_vpaddq_v:
6125 // We don't allow fp/int overloading of intrinsics.
6126 if (VTy->getElementType()->isFloatingPointTy() &&
6127 Int == Intrinsic::aarch64_neon_addp)
6128 Int = Intrinsic::aarch64_neon_faddp;
6129 break;
6130 case NEON::BI__builtin_neon_vabs_v:
6131 case NEON::BI__builtin_neon_vabsq_v:
6132 if (VTy->getElementType()->isFloatingPointTy())
6133 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
6134 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
6135 case NEON::BI__builtin_neon_vaddhn_v: {
6136 llvm::FixedVectorType *SrcTy =
6137 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6138
6139 // %sum = add <4 x i32> %lhs, %rhs
6140 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6141 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6142 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
6143
6144 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6145 Constant *ShiftAmt =
6146 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6147 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
6148
6149 // %res = trunc <4 x i32> %high to <4 x i16>
6150 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
6151 }
6152 case NEON::BI__builtin_neon_vcale_v:
6153 case NEON::BI__builtin_neon_vcaleq_v:
6154 case NEON::BI__builtin_neon_vcalt_v:
6155 case NEON::BI__builtin_neon_vcaltq_v:
6156 std::swap(Ops[0], Ops[1]);
6157 LLVM_FALLTHROUGH;
6158 case NEON::BI__builtin_neon_vcage_v:
6159 case NEON::BI__builtin_neon_vcageq_v:
6160 case NEON::BI__builtin_neon_vcagt_v:
6161 case NEON::BI__builtin_neon_vcagtq_v: {
6162 llvm::Type *Ty;
6163 switch (VTy->getScalarSizeInBits()) {
6164 default: llvm_unreachable("unexpected type");
6165 case 32:
6166 Ty = FloatTy;
6167 break;
6168 case 64:
6169 Ty = DoubleTy;
6170 break;
6171 case 16:
6172 Ty = HalfTy;
6173 break;
6174 }
6175 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
6176 llvm::Type *Tys[] = { VTy, VecFlt };
6177 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6178 return EmitNeonCall(F, Ops, NameHint);
6179 }
6180 case NEON::BI__builtin_neon_vceqz_v:
6181 case NEON::BI__builtin_neon_vceqzq_v:
6182 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
6183 ICmpInst::ICMP_EQ, "vceqz");
6184 case NEON::BI__builtin_neon_vcgez_v:
6185 case NEON::BI__builtin_neon_vcgezq_v:
6186 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
6187 ICmpInst::ICMP_SGE, "vcgez");
6188 case NEON::BI__builtin_neon_vclez_v:
6189 case NEON::BI__builtin_neon_vclezq_v:
6190 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
6191 ICmpInst::ICMP_SLE, "vclez");
6192 case NEON::BI__builtin_neon_vcgtz_v:
6193 case NEON::BI__builtin_neon_vcgtzq_v:
6194 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
6195 ICmpInst::ICMP_SGT, "vcgtz");
6196 case NEON::BI__builtin_neon_vcltz_v:
6197 case NEON::BI__builtin_neon_vcltzq_v:
6198 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
6199 ICmpInst::ICMP_SLT, "vcltz");
6200 case NEON::BI__builtin_neon_vclz_v:
6201 case NEON::BI__builtin_neon_vclzq_v:
6202 // We generate target-independent intrinsic, which needs a second argument
6203 // for whether or not clz of zero is undefined; on ARM it isn't.
6204 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
6205 break;
6206 case NEON::BI__builtin_neon_vcvt_f32_v:
6207 case NEON::BI__builtin_neon_vcvtq_f32_v:
6208 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6209 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
6210 HasLegalHalfType);
6211 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6212 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6213 case NEON::BI__builtin_neon_vcvt_f16_v:
6214 case NEON::BI__builtin_neon_vcvtq_f16_v:
6215 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6216 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
6217 HasLegalHalfType);
6218 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6219 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6220 case NEON::BI__builtin_neon_vcvt_n_f16_v:
6221 case NEON::BI__builtin_neon_vcvt_n_f32_v:
6222 case NEON::BI__builtin_neon_vcvt_n_f64_v:
6223 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
6224 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
6225 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
6226 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
6227 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6228 Function *F = CGM.getIntrinsic(Int, Tys);
6229 return EmitNeonCall(F, Ops, "vcvt_n");
6230 }
6231 case NEON::BI__builtin_neon_vcvt_n_s16_v:
6232 case NEON::BI__builtin_neon_vcvt_n_s32_v:
6233 case NEON::BI__builtin_neon_vcvt_n_u16_v:
6234 case NEON::BI__builtin_neon_vcvt_n_u32_v:
6235 case NEON::BI__builtin_neon_vcvt_n_s64_v:
6236 case NEON::BI__builtin_neon_vcvt_n_u64_v:
6237 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
6238 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
6239 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
6240 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
6241 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
6242 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
6243 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6244 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6245 return EmitNeonCall(F, Ops, "vcvt_n");
6246 }
6247 case NEON::BI__builtin_neon_vcvt_s32_v:
6248 case NEON::BI__builtin_neon_vcvt_u32_v:
6249 case NEON::BI__builtin_neon_vcvt_s64_v:
6250 case NEON::BI__builtin_neon_vcvt_u64_v:
6251 case NEON::BI__builtin_neon_vcvt_s16_v:
6252 case NEON::BI__builtin_neon_vcvt_u16_v:
6253 case NEON::BI__builtin_neon_vcvtq_s32_v:
6254 case NEON::BI__builtin_neon_vcvtq_u32_v:
6255 case NEON::BI__builtin_neon_vcvtq_s64_v:
6256 case NEON::BI__builtin_neon_vcvtq_u64_v:
6257 case NEON::BI__builtin_neon_vcvtq_s16_v:
6258 case NEON::BI__builtin_neon_vcvtq_u16_v: {
6259 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
6260 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
6261 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
6262 }
6263 case NEON::BI__builtin_neon_vcvta_s16_v:
6264 case NEON::BI__builtin_neon_vcvta_s32_v:
6265 case NEON::BI__builtin_neon_vcvta_s64_v:
6266 case NEON::BI__builtin_neon_vcvta_u16_v:
6267 case NEON::BI__builtin_neon_vcvta_u32_v:
6268 case NEON::BI__builtin_neon_vcvta_u64_v:
6269 case NEON::BI__builtin_neon_vcvtaq_s16_v:
6270 case NEON::BI__builtin_neon_vcvtaq_s32_v:
6271 case NEON::BI__builtin_neon_vcvtaq_s64_v:
6272 case NEON::BI__builtin_neon_vcvtaq_u16_v:
6273 case NEON::BI__builtin_neon_vcvtaq_u32_v:
6274 case NEON::BI__builtin_neon_vcvtaq_u64_v:
6275 case NEON::BI__builtin_neon_vcvtn_s16_v:
6276 case NEON::BI__builtin_neon_vcvtn_s32_v:
6277 case NEON::BI__builtin_neon_vcvtn_s64_v:
6278 case NEON::BI__builtin_neon_vcvtn_u16_v:
6279 case NEON::BI__builtin_neon_vcvtn_u32_v:
6280 case NEON::BI__builtin_neon_vcvtn_u64_v:
6281 case NEON::BI__builtin_neon_vcvtnq_s16_v:
6282 case NEON::BI__builtin_neon_vcvtnq_s32_v:
6283 case NEON::BI__builtin_neon_vcvtnq_s64_v:
6284 case NEON::BI__builtin_neon_vcvtnq_u16_v:
6285 case NEON::BI__builtin_neon_vcvtnq_u32_v:
6286 case NEON::BI__builtin_neon_vcvtnq_u64_v:
6287 case NEON::BI__builtin_neon_vcvtp_s16_v:
6288 case NEON::BI__builtin_neon_vcvtp_s32_v:
6289 case NEON::BI__builtin_neon_vcvtp_s64_v:
6290 case NEON::BI__builtin_neon_vcvtp_u16_v:
6291 case NEON::BI__builtin_neon_vcvtp_u32_v:
6292 case NEON::BI__builtin_neon_vcvtp_u64_v:
6293 case NEON::BI__builtin_neon_vcvtpq_s16_v:
6294 case NEON::BI__builtin_neon_vcvtpq_s32_v:
6295 case NEON::BI__builtin_neon_vcvtpq_s64_v:
6296 case NEON::BI__builtin_neon_vcvtpq_u16_v:
6297 case NEON::BI__builtin_neon_vcvtpq_u32_v:
6298 case NEON::BI__builtin_neon_vcvtpq_u64_v:
6299 case NEON::BI__builtin_neon_vcvtm_s16_v:
6300 case NEON::BI__builtin_neon_vcvtm_s32_v:
6301 case NEON::BI__builtin_neon_vcvtm_s64_v:
6302 case NEON::BI__builtin_neon_vcvtm_u16_v:
6303 case NEON::BI__builtin_neon_vcvtm_u32_v:
6304 case NEON::BI__builtin_neon_vcvtm_u64_v:
6305 case NEON::BI__builtin_neon_vcvtmq_s16_v:
6306 case NEON::BI__builtin_neon_vcvtmq_s32_v:
6307 case NEON::BI__builtin_neon_vcvtmq_s64_v:
6308 case NEON::BI__builtin_neon_vcvtmq_u16_v:
6309 case NEON::BI__builtin_neon_vcvtmq_u32_v:
6310 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
6311 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6312 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6313 }
6314 case NEON::BI__builtin_neon_vcvtx_f32_v: {
6315 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
6316 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6317
6318 }
6319 case NEON::BI__builtin_neon_vext_v:
6320 case NEON::BI__builtin_neon_vextq_v: {
6321 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
6322 SmallVector<int, 16> Indices;
6323 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6324 Indices.push_back(i+CV);
6325
6326 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6327 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6328 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
6329 }
6330 case NEON::BI__builtin_neon_vfma_v:
6331 case NEON::BI__builtin_neon_vfmaq_v: {
6332 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6333 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6334 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6335
6336 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
6337 return emitCallMaybeConstrainedFPBuiltin(
6338 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6339 {Ops[1], Ops[2], Ops[0]});
6340 }
6341 case NEON::BI__builtin_neon_vld1_v:
6342 case NEON::BI__builtin_neon_vld1q_v: {
6343 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6344 Ops.push_back(getAlignmentValue32(PtrOp0));
6345 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
6346 }
6347 case NEON::BI__builtin_neon_vld1_x2_v:
6348 case NEON::BI__builtin_neon_vld1q_x2_v:
6349 case NEON::BI__builtin_neon_vld1_x3_v:
6350 case NEON::BI__builtin_neon_vld1q_x3_v:
6351 case NEON::BI__builtin_neon_vld1_x4_v:
6352 case NEON::BI__builtin_neon_vld1q_x4_v: {
6353 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6354 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
6355 llvm::Type *Tys[2] = { VTy, PTy };
6356 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6357 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
6358 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6359 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6360 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6361 }
6362 case NEON::BI__builtin_neon_vld2_v:
6363 case NEON::BI__builtin_neon_vld2q_v:
6364 case NEON::BI__builtin_neon_vld3_v:
6365 case NEON::BI__builtin_neon_vld3q_v:
6366 case NEON::BI__builtin_neon_vld4_v:
6367 case NEON::BI__builtin_neon_vld4q_v:
6368 case NEON::BI__builtin_neon_vld2_dup_v:
6369 case NEON::BI__builtin_neon_vld2q_dup_v:
6370 case NEON::BI__builtin_neon_vld3_dup_v:
6371 case NEON::BI__builtin_neon_vld3q_dup_v:
6372 case NEON::BI__builtin_neon_vld4_dup_v:
6373 case NEON::BI__builtin_neon_vld4q_dup_v: {
6374 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6375 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6376 Value *Align = getAlignmentValue32(PtrOp1);
6377 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
6378 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6379 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6380 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6381 }
6382 case NEON::BI__builtin_neon_vld1_dup_v:
6383 case NEON::BI__builtin_neon_vld1q_dup_v: {
6384 Value *V = UndefValue::get(Ty);
6385 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
6386 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
6387 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
6388 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6389 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
6390 return EmitNeonSplat(Ops[0], CI);
6391 }
6392 case NEON::BI__builtin_neon_vld2_lane_v:
6393 case NEON::BI__builtin_neon_vld2q_lane_v:
6394 case NEON::BI__builtin_neon_vld3_lane_v:
6395 case NEON::BI__builtin_neon_vld3q_lane_v:
6396 case NEON::BI__builtin_neon_vld4_lane_v:
6397 case NEON::BI__builtin_neon_vld4q_lane_v: {
6398 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6399 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6400 for (unsigned I = 2; I < Ops.size() - 1; ++I)
6401 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
6402 Ops.push_back(getAlignmentValue32(PtrOp1));
6403 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
6404 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6405 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6406 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6407 }
6408 case NEON::BI__builtin_neon_vmovl_v: {
6409 llvm::FixedVectorType *DTy =
6410 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6411 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
6412 if (Usgn)
6413 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
6414 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
6415 }
6416 case NEON::BI__builtin_neon_vmovn_v: {
6417 llvm::FixedVectorType *QTy =
6418 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6419 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
6420 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
6421 }
6422 case NEON::BI__builtin_neon_vmull_v:
6423 // FIXME: the integer vmull operations could be emitted in terms of pure
6424 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
6425 // hoisting the exts outside loops. Until global ISel comes along that can
6426 // see through such movement this leads to bad CodeGen. So we need an
6427 // intrinsic for now.
6428 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
6429 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
6430 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
6431 case NEON::BI__builtin_neon_vpadal_v:
6432 case NEON::BI__builtin_neon_vpadalq_v: {
6433 // The source operand type has twice as many elements of half the size.
6434 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6435 llvm::Type *EltTy =
6436 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6437 auto *NarrowTy =
6438 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6439 llvm::Type *Tys[2] = { Ty, NarrowTy };
6440 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6441 }
6442 case NEON::BI__builtin_neon_vpaddl_v:
6443 case NEON::BI__builtin_neon_vpaddlq_v: {
6444 // The source operand type has twice as many elements of half the size.
6445 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6446 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6447 auto *NarrowTy =
6448 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6449 llvm::Type *Tys[2] = { Ty, NarrowTy };
6450 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
6451 }
6452 case NEON::BI__builtin_neon_vqdmlal_v:
6453 case NEON::BI__builtin_neon_vqdmlsl_v: {
6454 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6455 Ops[1] =
6456 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6457 Ops.resize(2);
6458 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6459 }
6460 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6461 case NEON::BI__builtin_neon_vqdmulh_lane_v:
6462 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6463 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6464 auto *RTy = cast<llvm::FixedVectorType>(Ty);
6465 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6466 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6467 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6468 RTy->getNumElements() * 2);
6469 llvm::Type *Tys[2] = {
6470 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6471 /*isQuad*/ false))};
6472 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6473 }
6474 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6475 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6476 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6477 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6478 llvm::Type *Tys[2] = {
6479 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6480 /*isQuad*/ true))};
6481 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6482 }
6483 case NEON::BI__builtin_neon_vqshl_n_v:
6484 case NEON::BI__builtin_neon_vqshlq_n_v:
6485 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6486 1, false);
6487 case NEON::BI__builtin_neon_vqshlu_n_v:
6488 case NEON::BI__builtin_neon_vqshluq_n_v:
6489 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6490 1, false);
6491 case NEON::BI__builtin_neon_vrecpe_v:
6492 case NEON::BI__builtin_neon_vrecpeq_v:
6493 case NEON::BI__builtin_neon_vrsqrte_v:
6494 case NEON::BI__builtin_neon_vrsqrteq_v:
6495 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6496 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6497 case NEON::BI__builtin_neon_vrndi_v:
6498 case NEON::BI__builtin_neon_vrndiq_v:
6499 Int = Builder.getIsFPConstrained()
6500 ? Intrinsic::experimental_constrained_nearbyint
6501 : Intrinsic::nearbyint;
6502 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6503 case NEON::BI__builtin_neon_vrshr_n_v:
6504 case NEON::BI__builtin_neon_vrshrq_n_v:
6505 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6506 1, true);
6507 case NEON::BI__builtin_neon_vshl_n_v:
6508 case NEON::BI__builtin_neon_vshlq_n_v:
6509 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6510 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6511 "vshl_n");
6512 case NEON::BI__builtin_neon_vshll_n_v: {
6513 llvm::FixedVectorType *SrcTy =
6514 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6515 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6516 if (Usgn)
6517 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6518 else
6519 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6520 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6521 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6522 }
6523 case NEON::BI__builtin_neon_vshrn_n_v: {
6524 llvm::FixedVectorType *SrcTy =
6525 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6526 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6527 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6528 if (Usgn)
6529 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6530 else
6531 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6532 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6533 }
6534 case NEON::BI__builtin_neon_vshr_n_v:
6535 case NEON::BI__builtin_neon_vshrq_n_v:
6536 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6537 case NEON::BI__builtin_neon_vst1_v:
6538 case NEON::BI__builtin_neon_vst1q_v:
6539 case NEON::BI__builtin_neon_vst2_v:
6540 case NEON::BI__builtin_neon_vst2q_v:
6541 case NEON::BI__builtin_neon_vst3_v:
6542 case NEON::BI__builtin_neon_vst3q_v:
6543 case NEON::BI__builtin_neon_vst4_v:
6544 case NEON::BI__builtin_neon_vst4q_v:
6545 case NEON::BI__builtin_neon_vst2_lane_v:
6546 case NEON::BI__builtin_neon_vst2q_lane_v:
6547 case NEON::BI__builtin_neon_vst3_lane_v:
6548 case NEON::BI__builtin_neon_vst3q_lane_v:
6549 case NEON::BI__builtin_neon_vst4_lane_v:
6550 case NEON::BI__builtin_neon_vst4q_lane_v: {
6551 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6552 Ops.push_back(getAlignmentValue32(PtrOp0));
6553 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6554 }
6555 case NEON::BI__builtin_neon_vst1_x2_v:
6556 case NEON::BI__builtin_neon_vst1q_x2_v:
6557 case NEON::BI__builtin_neon_vst1_x3_v:
6558 case NEON::BI__builtin_neon_vst1q_x3_v:
6559 case NEON::BI__builtin_neon_vst1_x4_v:
6560 case NEON::BI__builtin_neon_vst1q_x4_v: {
6561 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6562 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6563 // in AArch64 it comes last. We may want to stick to one or another.
6564 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6565 Arch == llvm::Triple::aarch64_32) {
6566 llvm::Type *Tys[2] = { VTy, PTy };
6567 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6568 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6569 }
6570 llvm::Type *Tys[2] = { PTy, VTy };
6571 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6572 }
6573 case NEON::BI__builtin_neon_vsubhn_v: {
6574 llvm::FixedVectorType *SrcTy =
6575 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6576
6577 // %sum = add <4 x i32> %lhs, %rhs
6578 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6579 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6580 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6581
6582 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6583 Constant *ShiftAmt =
6584 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6585 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6586
6587 // %res = trunc <4 x i32> %high to <4 x i16>
6588 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6589 }
6590 case NEON::BI__builtin_neon_vtrn_v:
6591 case NEON::BI__builtin_neon_vtrnq_v: {
6592 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6593 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6594 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6595 Value *SV = nullptr;
6596
6597 for (unsigned vi = 0; vi != 2; ++vi) {
6598 SmallVector<int, 16> Indices;
6599 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6600 Indices.push_back(i+vi);
6601 Indices.push_back(i+e+vi);
6602 }
6603 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6604 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6605 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6606 }
6607 return SV;
6608 }
6609 case NEON::BI__builtin_neon_vtst_v:
6610 case NEON::BI__builtin_neon_vtstq_v: {
6611 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6612 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6613 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6614 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6615 ConstantAggregateZero::get(Ty));
6616 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6617 }
6618 case NEON::BI__builtin_neon_vuzp_v:
6619 case NEON::BI__builtin_neon_vuzpq_v: {
6620 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6621 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6622 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6623 Value *SV = nullptr;
6624
6625 for (unsigned vi = 0; vi != 2; ++vi) {
6626 SmallVector<int, 16> Indices;
6627 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6628 Indices.push_back(2*i+vi);
6629
6630 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6631 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6632 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6633 }
6634 return SV;
6635 }
6636 case NEON::BI__builtin_neon_vzip_v:
6637 case NEON::BI__builtin_neon_vzipq_v: {
6638 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6639 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6640 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6641 Value *SV = nullptr;
6642
6643 for (unsigned vi = 0; vi != 2; ++vi) {
6644 SmallVector<int, 16> Indices;
6645 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6646 Indices.push_back((i + vi*e) >> 1);
6647 Indices.push_back(((i + vi*e) >> 1)+e);
6648 }
6649 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6650 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6651 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6652 }
6653 return SV;
6654 }
6655 case NEON::BI__builtin_neon_vdot_v:
6656 case NEON::BI__builtin_neon_vdotq_v: {
6657 auto *InputTy =
6658 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6659 llvm::Type *Tys[2] = { Ty, InputTy };
6660 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6661 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6662 }
6663 case NEON::BI__builtin_neon_vfmlal_low_v:
6664 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6665 auto *InputTy =
6666 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6667 llvm::Type *Tys[2] = { Ty, InputTy };
6668 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6669 }
6670 case NEON::BI__builtin_neon_vfmlsl_low_v:
6671 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6672 auto *InputTy =
6673 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6674 llvm::Type *Tys[2] = { Ty, InputTy };
6675 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6676 }
6677 case NEON::BI__builtin_neon_vfmlal_high_v:
6678 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6679 auto *InputTy =
6680 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6681 llvm::Type *Tys[2] = { Ty, InputTy };
6682 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6683 }
6684 case NEON::BI__builtin_neon_vfmlsl_high_v:
6685 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6686 auto *InputTy =
6687 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6688 llvm::Type *Tys[2] = { Ty, InputTy };
6689 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6690 }
6691 case NEON::BI__builtin_neon_vmmlaq_v: {
6692 auto *InputTy =
6693 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6694 llvm::Type *Tys[2] = { Ty, InputTy };
6695 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6696 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
6697 }
6698 case NEON::BI__builtin_neon_vusmmlaq_v: {
6699 auto *InputTy =
6700 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6701 llvm::Type *Tys[2] = { Ty, InputTy };
6702 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
6703 }
6704 case NEON::BI__builtin_neon_vusdot_v:
6705 case NEON::BI__builtin_neon_vusdotq_v: {
6706 auto *InputTy =
6707 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6708 llvm::Type *Tys[2] = { Ty, InputTy };
6709 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
6710 }
6711 case NEON::BI__builtin_neon_vbfdot_v:
6712 case NEON::BI__builtin_neon_vbfdotq_v: {
6713 llvm::Type *InputTy =
6714 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
6715 llvm::Type *Tys[2] = { Ty, InputTy };
6716 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
6717 }
6718 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
6719 llvm::Type *Tys[1] = { Ty };
6720 Function *F = CGM.getIntrinsic(Int, Tys);
6721 return EmitNeonCall(F, Ops, "vcvtfp2bf");
6722 }
6723
6724 }
6725
6726 assert(Int && "Expected valid intrinsic number");
6727
6728 // Determine the type(s) of this overloaded AArch64 intrinsic.
6729 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6730
6731 Value *Result = EmitNeonCall(F, Ops, NameHint);
6732 llvm::Type *ResultType = ConvertType(E->getType());
6733 // AArch64 intrinsic one-element vector type cast to
6734 // scalar type expected by the builtin
6735 return Builder.CreateBitCast(Result, ResultType, NameHint);
6736 }
6737
EmitAArch64CompareBuiltinExpr(Value * Op,llvm::Type * Ty,const CmpInst::Predicate Fp,const CmpInst::Predicate Ip,const Twine & Name)6738 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6739 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6740 const CmpInst::Predicate Ip, const Twine &Name) {
6741 llvm::Type *OTy = Op->getType();
6742
6743 // FIXME: this is utterly horrific. We should not be looking at previous
6744 // codegen context to find out what needs doing. Unfortunately TableGen
6745 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6746 // (etc).
6747 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6748 OTy = BI->getOperand(0)->getType();
6749
6750 Op = Builder.CreateBitCast(Op, OTy);
6751 if (OTy->getScalarType()->isFloatingPointTy()) {
6752 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6753 } else {
6754 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6755 }
6756 return Builder.CreateSExt(Op, Ty, Name);
6757 }
6758
packTBLDVectorList(CodeGenFunction & CGF,ArrayRef<Value * > Ops,Value * ExtOp,Value * IndexOp,llvm::Type * ResTy,unsigned IntID,const char * Name)6759 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6760 Value *ExtOp, Value *IndexOp,
6761 llvm::Type *ResTy, unsigned IntID,
6762 const char *Name) {
6763 SmallVector<Value *, 2> TblOps;
6764 if (ExtOp)
6765 TblOps.push_back(ExtOp);
6766
6767 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6768 SmallVector<int, 16> Indices;
6769 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
6770 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6771 Indices.push_back(2*i);
6772 Indices.push_back(2*i+1);
6773 }
6774
6775 int PairPos = 0, End = Ops.size() - 1;
6776 while (PairPos < End) {
6777 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6778 Ops[PairPos+1], Indices,
6779 Name));
6780 PairPos += 2;
6781 }
6782
6783 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6784 // of the 128-bit lookup table with zero.
6785 if (PairPos == End) {
6786 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6787 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6788 ZeroTbl, Indices, Name));
6789 }
6790
6791 Function *TblF;
6792 TblOps.push_back(IndexOp);
6793 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6794
6795 return CGF.EmitNeonCall(TblF, TblOps, Name);
6796 }
6797
GetValueForARMHint(unsigned BuiltinID)6798 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6799 unsigned Value;
6800 switch (BuiltinID) {
6801 default:
6802 return nullptr;
6803 case ARM::BI__builtin_arm_nop:
6804 Value = 0;
6805 break;
6806 case ARM::BI__builtin_arm_yield:
6807 case ARM::BI__yield:
6808 Value = 1;
6809 break;
6810 case ARM::BI__builtin_arm_wfe:
6811 case ARM::BI__wfe:
6812 Value = 2;
6813 break;
6814 case ARM::BI__builtin_arm_wfi:
6815 case ARM::BI__wfi:
6816 Value = 3;
6817 break;
6818 case ARM::BI__builtin_arm_sev:
6819 case ARM::BI__sev:
6820 Value = 4;
6821 break;
6822 case ARM::BI__builtin_arm_sevl:
6823 case ARM::BI__sevl:
6824 Value = 5;
6825 break;
6826 }
6827
6828 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6829 llvm::ConstantInt::get(Int32Ty, Value));
6830 }
6831
6832 enum SpecialRegisterAccessKind {
6833 NormalRead,
6834 VolatileRead,
6835 Write,
6836 };
6837
6838 // Generates the IR for the read/write special register builtin,
6839 // ValueType is the type of the value that is to be written or read,
6840 // RegisterType is the type of the register being written to or read from.
EmitSpecialRegisterBuiltin(CodeGenFunction & CGF,const CallExpr * E,llvm::Type * RegisterType,llvm::Type * ValueType,SpecialRegisterAccessKind AccessKind,StringRef SysReg="")6841 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6842 const CallExpr *E,
6843 llvm::Type *RegisterType,
6844 llvm::Type *ValueType,
6845 SpecialRegisterAccessKind AccessKind,
6846 StringRef SysReg = "") {
6847 // write and register intrinsics only support 32 and 64 bit operations.
6848 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
6849 && "Unsupported size for register.");
6850
6851 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6852 CodeGen::CodeGenModule &CGM = CGF.CGM;
6853 LLVMContext &Context = CGM.getLLVMContext();
6854
6855 if (SysReg.empty()) {
6856 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6857 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6858 }
6859
6860 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6861 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6862 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6863
6864 llvm::Type *Types[] = { RegisterType };
6865
6866 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6867 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
6868 && "Can't fit 64-bit value in 32-bit register");
6869
6870 if (AccessKind != Write) {
6871 assert(AccessKind == NormalRead || AccessKind == VolatileRead);
6872 llvm::Function *F = CGM.getIntrinsic(
6873 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
6874 : llvm::Intrinsic::read_register,
6875 Types);
6876 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6877
6878 if (MixedTypes)
6879 // Read into 64 bit register and then truncate result to 32 bit.
6880 return Builder.CreateTrunc(Call, ValueType);
6881
6882 if (ValueType->isPointerTy())
6883 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6884 return Builder.CreateIntToPtr(Call, ValueType);
6885
6886 return Call;
6887 }
6888
6889 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6890 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6891 if (MixedTypes) {
6892 // Extend 32 bit write value to 64 bit to pass to write.
6893 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6894 return Builder.CreateCall(F, { Metadata, ArgValue });
6895 }
6896
6897 if (ValueType->isPointerTy()) {
6898 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6899 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6900 return Builder.CreateCall(F, { Metadata, ArgValue });
6901 }
6902
6903 return Builder.CreateCall(F, { Metadata, ArgValue });
6904 }
6905
6906 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6907 /// argument that specifies the vector type.
HasExtraNeonArgument(unsigned BuiltinID)6908 static bool HasExtraNeonArgument(unsigned BuiltinID) {
6909 switch (BuiltinID) {
6910 default: break;
6911 case NEON::BI__builtin_neon_vget_lane_i8:
6912 case NEON::BI__builtin_neon_vget_lane_i16:
6913 case NEON::BI__builtin_neon_vget_lane_bf16:
6914 case NEON::BI__builtin_neon_vget_lane_i32:
6915 case NEON::BI__builtin_neon_vget_lane_i64:
6916 case NEON::BI__builtin_neon_vget_lane_f32:
6917 case NEON::BI__builtin_neon_vgetq_lane_i8:
6918 case NEON::BI__builtin_neon_vgetq_lane_i16:
6919 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6920 case NEON::BI__builtin_neon_vgetq_lane_i32:
6921 case NEON::BI__builtin_neon_vgetq_lane_i64:
6922 case NEON::BI__builtin_neon_vgetq_lane_f32:
6923 case NEON::BI__builtin_neon_vduph_lane_bf16:
6924 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6925 case NEON::BI__builtin_neon_vset_lane_i8:
6926 case NEON::BI__builtin_neon_vset_lane_i16:
6927 case NEON::BI__builtin_neon_vset_lane_bf16:
6928 case NEON::BI__builtin_neon_vset_lane_i32:
6929 case NEON::BI__builtin_neon_vset_lane_i64:
6930 case NEON::BI__builtin_neon_vset_lane_f32:
6931 case NEON::BI__builtin_neon_vsetq_lane_i8:
6932 case NEON::BI__builtin_neon_vsetq_lane_i16:
6933 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6934 case NEON::BI__builtin_neon_vsetq_lane_i32:
6935 case NEON::BI__builtin_neon_vsetq_lane_i64:
6936 case NEON::BI__builtin_neon_vsetq_lane_f32:
6937 case NEON::BI__builtin_neon_vsha1h_u32:
6938 case NEON::BI__builtin_neon_vsha1cq_u32:
6939 case NEON::BI__builtin_neon_vsha1pq_u32:
6940 case NEON::BI__builtin_neon_vsha1mq_u32:
6941 case NEON::BI__builtin_neon_vcvth_bf16_f32:
6942 case clang::ARM::BI_MoveToCoprocessor:
6943 case clang::ARM::BI_MoveToCoprocessor2:
6944 return false;
6945 }
6946 return true;
6947 }
6948
EmitARMBuiltinExpr(unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Triple::ArchType Arch)6949 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6950 const CallExpr *E,
6951 ReturnValueSlot ReturnValue,
6952 llvm::Triple::ArchType Arch) {
6953 if (auto Hint = GetValueForARMHint(BuiltinID))
6954 return Hint;
6955
6956 if (BuiltinID == ARM::BI__emit) {
6957 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6958 llvm::FunctionType *FTy =
6959 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6960
6961 Expr::EvalResult Result;
6962 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6963 llvm_unreachable("Sema will ensure that the parameter is constant");
6964
6965 llvm::APSInt Value = Result.Val.getInt();
6966 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6967
6968 llvm::InlineAsm *Emit =
6969 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6970 /*hasSideEffects=*/true)
6971 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6972 /*hasSideEffects=*/true);
6973
6974 return Builder.CreateCall(Emit);
6975 }
6976
6977 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6978 Value *Option = EmitScalarExpr(E->getArg(0));
6979 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6980 }
6981
6982 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6983 Value *Address = EmitScalarExpr(E->getArg(0));
6984 Value *RW = EmitScalarExpr(E->getArg(1));
6985 Value *IsData = EmitScalarExpr(E->getArg(2));
6986
6987 // Locality is not supported on ARM target
6988 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6989
6990 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6991 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6992 }
6993
6994 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6995 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6996 return Builder.CreateCall(
6997 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6998 }
6999
7000 if (BuiltinID == ARM::BI__builtin_arm_cls) {
7001 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7002 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7003 }
7004 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
7005 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7006 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
7007 "cls");
7008 }
7009
7010 if (BuiltinID == ARM::BI__clear_cache) {
7011 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7012 const FunctionDecl *FD = E->getDirectCallee();
7013 Value *Ops[2];
7014 for (unsigned i = 0; i < 2; i++)
7015 Ops[i] = EmitScalarExpr(E->getArg(i));
7016 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7017 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7018 StringRef Name = FD->getName();
7019 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7020 }
7021
7022 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
7023 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
7024 Function *F;
7025
7026 switch (BuiltinID) {
7027 default: llvm_unreachable("unexpected builtin");
7028 case ARM::BI__builtin_arm_mcrr:
7029 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
7030 break;
7031 case ARM::BI__builtin_arm_mcrr2:
7032 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
7033 break;
7034 }
7035
7036 // MCRR{2} instruction has 5 operands but
7037 // the intrinsic has 4 because Rt and Rt2
7038 // are represented as a single unsigned 64
7039 // bit integer in the intrinsic definition
7040 // but internally it's represented as 2 32
7041 // bit integers.
7042
7043 Value *Coproc = EmitScalarExpr(E->getArg(0));
7044 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7045 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
7046 Value *CRm = EmitScalarExpr(E->getArg(3));
7047
7048 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7049 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
7050 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
7051 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
7052
7053 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
7054 }
7055
7056 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
7057 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
7058 Function *F;
7059
7060 switch (BuiltinID) {
7061 default: llvm_unreachable("unexpected builtin");
7062 case ARM::BI__builtin_arm_mrrc:
7063 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
7064 break;
7065 case ARM::BI__builtin_arm_mrrc2:
7066 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
7067 break;
7068 }
7069
7070 Value *Coproc = EmitScalarExpr(E->getArg(0));
7071 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7072 Value *CRm = EmitScalarExpr(E->getArg(2));
7073 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
7074
7075 // Returns an unsigned 64 bit integer, represented
7076 // as two 32 bit integers.
7077
7078 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
7079 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
7080 Rt = Builder.CreateZExt(Rt, Int64Ty);
7081 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
7082
7083 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
7084 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
7085 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
7086
7087 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
7088 }
7089
7090 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
7091 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
7092 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
7093 getContext().getTypeSize(E->getType()) == 64) ||
7094 BuiltinID == ARM::BI__ldrexd) {
7095 Function *F;
7096
7097 switch (BuiltinID) {
7098 default: llvm_unreachable("unexpected builtin");
7099 case ARM::BI__builtin_arm_ldaex:
7100 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
7101 break;
7102 case ARM::BI__builtin_arm_ldrexd:
7103 case ARM::BI__builtin_arm_ldrex:
7104 case ARM::BI__ldrexd:
7105 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
7106 break;
7107 }
7108
7109 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7110 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7111 "ldrexd");
7112
7113 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7114 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7115 Val0 = Builder.CreateZExt(Val0, Int64Ty);
7116 Val1 = Builder.CreateZExt(Val1, Int64Ty);
7117
7118 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
7119 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7120 Val = Builder.CreateOr(Val, Val1);
7121 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7122 }
7123
7124 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
7125 BuiltinID == ARM::BI__builtin_arm_ldaex) {
7126 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7127
7128 QualType Ty = E->getType();
7129 llvm::Type *RealResTy = ConvertType(Ty);
7130 llvm::Type *PtrTy = llvm::IntegerType::get(
7131 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7132 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7133
7134 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
7135 ? Intrinsic::arm_ldaex
7136 : Intrinsic::arm_ldrex,
7137 PtrTy);
7138 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
7139
7140 if (RealResTy->isPointerTy())
7141 return Builder.CreateIntToPtr(Val, RealResTy);
7142 else {
7143 llvm::Type *IntResTy = llvm::IntegerType::get(
7144 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7145 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7146 return Builder.CreateBitCast(Val, RealResTy);
7147 }
7148 }
7149
7150 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
7151 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
7152 BuiltinID == ARM::BI__builtin_arm_strex) &&
7153 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
7154 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7155 ? Intrinsic::arm_stlexd
7156 : Intrinsic::arm_strexd);
7157 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
7158
7159 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7160 Value *Val = EmitScalarExpr(E->getArg(0));
7161 Builder.CreateStore(Val, Tmp);
7162
7163 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
7164 Val = Builder.CreateLoad(LdPtr);
7165
7166 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7167 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7168 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
7169 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
7170 }
7171
7172 if (BuiltinID == ARM::BI__builtin_arm_strex ||
7173 BuiltinID == ARM::BI__builtin_arm_stlex) {
7174 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7175 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7176
7177 QualType Ty = E->getArg(0)->getType();
7178 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7179 getContext().getTypeSize(Ty));
7180 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7181
7182 if (StoreVal->getType()->isPointerTy())
7183 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
7184 else {
7185 llvm::Type *IntTy = llvm::IntegerType::get(
7186 getLLVMContext(),
7187 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7188 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7189 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
7190 }
7191
7192 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7193 ? Intrinsic::arm_stlex
7194 : Intrinsic::arm_strex,
7195 StoreAddr->getType());
7196 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
7197 }
7198
7199 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
7200 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
7201 return Builder.CreateCall(F);
7202 }
7203
7204 // CRC32
7205 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7206 switch (BuiltinID) {
7207 case ARM::BI__builtin_arm_crc32b:
7208 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
7209 case ARM::BI__builtin_arm_crc32cb:
7210 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
7211 case ARM::BI__builtin_arm_crc32h:
7212 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
7213 case ARM::BI__builtin_arm_crc32ch:
7214 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
7215 case ARM::BI__builtin_arm_crc32w:
7216 case ARM::BI__builtin_arm_crc32d:
7217 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
7218 case ARM::BI__builtin_arm_crc32cw:
7219 case ARM::BI__builtin_arm_crc32cd:
7220 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
7221 }
7222
7223 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7224 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7225 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7226
7227 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
7228 // intrinsics, hence we need different codegen for these cases.
7229 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
7230 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
7231 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7232 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
7233 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
7234 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
7235
7236 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7237 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
7238 return Builder.CreateCall(F, {Res, Arg1b});
7239 } else {
7240 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
7241
7242 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7243 return Builder.CreateCall(F, {Arg0, Arg1});
7244 }
7245 }
7246
7247 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7248 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7249 BuiltinID == ARM::BI__builtin_arm_rsrp ||
7250 BuiltinID == ARM::BI__builtin_arm_wsr ||
7251 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7252 BuiltinID == ARM::BI__builtin_arm_wsrp) {
7253
7254 SpecialRegisterAccessKind AccessKind = Write;
7255 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7256 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7257 BuiltinID == ARM::BI__builtin_arm_rsrp)
7258 AccessKind = VolatileRead;
7259
7260 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
7261 BuiltinID == ARM::BI__builtin_arm_wsrp;
7262
7263 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7264 BuiltinID == ARM::BI__builtin_arm_wsr64;
7265
7266 llvm::Type *ValueType;
7267 llvm::Type *RegisterType;
7268 if (IsPointerBuiltin) {
7269 ValueType = VoidPtrTy;
7270 RegisterType = Int32Ty;
7271 } else if (Is64Bit) {
7272 ValueType = RegisterType = Int64Ty;
7273 } else {
7274 ValueType = RegisterType = Int32Ty;
7275 }
7276
7277 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
7278 AccessKind);
7279 }
7280
7281 // Handle MSVC intrinsics before argument evaluation to prevent double
7282 // evaluation.
7283 if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
7284 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
7285
7286 // Deal with MVE builtins
7287 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7288 return Result;
7289 // Handle CDE builtins
7290 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7291 return Result;
7292
7293 // Find out if any arguments are required to be integer constant
7294 // expressions.
7295 unsigned ICEArguments = 0;
7296 ASTContext::GetBuiltinTypeError Error;
7297 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7298 assert(Error == ASTContext::GE_None && "Should not codegen an error");
7299
7300 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7301 return Builder.getInt32(addr.getAlignment().getQuantity());
7302 };
7303
7304 Address PtrOp0 = Address::invalid();
7305 Address PtrOp1 = Address::invalid();
7306 SmallVector<Value*, 4> Ops;
7307 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
7308 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
7309 for (unsigned i = 0, e = NumArgs; i != e; i++) {
7310 if (i == 0) {
7311 switch (BuiltinID) {
7312 case NEON::BI__builtin_neon_vld1_v:
7313 case NEON::BI__builtin_neon_vld1q_v:
7314 case NEON::BI__builtin_neon_vld1q_lane_v:
7315 case NEON::BI__builtin_neon_vld1_lane_v:
7316 case NEON::BI__builtin_neon_vld1_dup_v:
7317 case NEON::BI__builtin_neon_vld1q_dup_v:
7318 case NEON::BI__builtin_neon_vst1_v:
7319 case NEON::BI__builtin_neon_vst1q_v:
7320 case NEON::BI__builtin_neon_vst1q_lane_v:
7321 case NEON::BI__builtin_neon_vst1_lane_v:
7322 case NEON::BI__builtin_neon_vst2_v:
7323 case NEON::BI__builtin_neon_vst2q_v:
7324 case NEON::BI__builtin_neon_vst2_lane_v:
7325 case NEON::BI__builtin_neon_vst2q_lane_v:
7326 case NEON::BI__builtin_neon_vst3_v:
7327 case NEON::BI__builtin_neon_vst3q_v:
7328 case NEON::BI__builtin_neon_vst3_lane_v:
7329 case NEON::BI__builtin_neon_vst3q_lane_v:
7330 case NEON::BI__builtin_neon_vst4_v:
7331 case NEON::BI__builtin_neon_vst4q_v:
7332 case NEON::BI__builtin_neon_vst4_lane_v:
7333 case NEON::BI__builtin_neon_vst4q_lane_v:
7334 // Get the alignment for the argument in addition to the value;
7335 // we'll use it later.
7336 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
7337 Ops.push_back(PtrOp0.getPointer());
7338 continue;
7339 }
7340 }
7341 if (i == 1) {
7342 switch (BuiltinID) {
7343 case NEON::BI__builtin_neon_vld2_v:
7344 case NEON::BI__builtin_neon_vld2q_v:
7345 case NEON::BI__builtin_neon_vld3_v:
7346 case NEON::BI__builtin_neon_vld3q_v:
7347 case NEON::BI__builtin_neon_vld4_v:
7348 case NEON::BI__builtin_neon_vld4q_v:
7349 case NEON::BI__builtin_neon_vld2_lane_v:
7350 case NEON::BI__builtin_neon_vld2q_lane_v:
7351 case NEON::BI__builtin_neon_vld3_lane_v:
7352 case NEON::BI__builtin_neon_vld3q_lane_v:
7353 case NEON::BI__builtin_neon_vld4_lane_v:
7354 case NEON::BI__builtin_neon_vld4q_lane_v:
7355 case NEON::BI__builtin_neon_vld2_dup_v:
7356 case NEON::BI__builtin_neon_vld2q_dup_v:
7357 case NEON::BI__builtin_neon_vld3_dup_v:
7358 case NEON::BI__builtin_neon_vld3q_dup_v:
7359 case NEON::BI__builtin_neon_vld4_dup_v:
7360 case NEON::BI__builtin_neon_vld4q_dup_v:
7361 // Get the alignment for the argument in addition to the value;
7362 // we'll use it later.
7363 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
7364 Ops.push_back(PtrOp1.getPointer());
7365 continue;
7366 }
7367 }
7368
7369 if ((ICEArguments & (1 << i)) == 0) {
7370 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7371 } else {
7372 // If this is required to be a constant, constant fold it so that we know
7373 // that the generated intrinsic gets a ConstantInt.
7374 Ops.push_back(llvm::ConstantInt::get(
7375 getLLVMContext(),
7376 *E->getArg(i)->getIntegerConstantExpr(getContext())));
7377 }
7378 }
7379
7380 switch (BuiltinID) {
7381 default: break;
7382
7383 case NEON::BI__builtin_neon_vget_lane_i8:
7384 case NEON::BI__builtin_neon_vget_lane_i16:
7385 case NEON::BI__builtin_neon_vget_lane_i32:
7386 case NEON::BI__builtin_neon_vget_lane_i64:
7387 case NEON::BI__builtin_neon_vget_lane_bf16:
7388 case NEON::BI__builtin_neon_vget_lane_f32:
7389 case NEON::BI__builtin_neon_vgetq_lane_i8:
7390 case NEON::BI__builtin_neon_vgetq_lane_i16:
7391 case NEON::BI__builtin_neon_vgetq_lane_i32:
7392 case NEON::BI__builtin_neon_vgetq_lane_i64:
7393 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7394 case NEON::BI__builtin_neon_vgetq_lane_f32:
7395 case NEON::BI__builtin_neon_vduph_lane_bf16:
7396 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7397 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
7398
7399 case NEON::BI__builtin_neon_vrndns_f32: {
7400 Value *Arg = EmitScalarExpr(E->getArg(0));
7401 llvm::Type *Tys[] = {Arg->getType()};
7402 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
7403 return Builder.CreateCall(F, {Arg}, "vrndn"); }
7404
7405 case NEON::BI__builtin_neon_vset_lane_i8:
7406 case NEON::BI__builtin_neon_vset_lane_i16:
7407 case NEON::BI__builtin_neon_vset_lane_i32:
7408 case NEON::BI__builtin_neon_vset_lane_i64:
7409 case NEON::BI__builtin_neon_vset_lane_bf16:
7410 case NEON::BI__builtin_neon_vset_lane_f32:
7411 case NEON::BI__builtin_neon_vsetq_lane_i8:
7412 case NEON::BI__builtin_neon_vsetq_lane_i16:
7413 case NEON::BI__builtin_neon_vsetq_lane_i32:
7414 case NEON::BI__builtin_neon_vsetq_lane_i64:
7415 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7416 case NEON::BI__builtin_neon_vsetq_lane_f32:
7417 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7418
7419 case NEON::BI__builtin_neon_vsha1h_u32:
7420 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
7421 "vsha1h");
7422 case NEON::BI__builtin_neon_vsha1cq_u32:
7423 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
7424 "vsha1h");
7425 case NEON::BI__builtin_neon_vsha1pq_u32:
7426 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
7427 "vsha1h");
7428 case NEON::BI__builtin_neon_vsha1mq_u32:
7429 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
7430 "vsha1h");
7431
7432 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
7433 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
7434 "vcvtbfp2bf");
7435 }
7436
7437 // The ARM _MoveToCoprocessor builtins put the input register value as
7438 // the first argument, but the LLVM intrinsic expects it as the third one.
7439 case ARM::BI_MoveToCoprocessor:
7440 case ARM::BI_MoveToCoprocessor2: {
7441 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
7442 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
7443 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
7444 Ops[3], Ops[4], Ops[5]});
7445 }
7446 }
7447
7448 // Get the last argument, which specifies the vector type.
7449 assert(HasExtraArg);
7450 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7451 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7452 if (!Result)
7453 return nullptr;
7454
7455 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7456 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7457 // Determine the overloaded type of this builtin.
7458 llvm::Type *Ty;
7459 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7460 Ty = FloatTy;
7461 else
7462 Ty = DoubleTy;
7463
7464 // Determine whether this is an unsigned conversion or not.
7465 bool usgn = Result->getZExtValue() == 1;
7466 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7467
7468 // Call the appropriate intrinsic.
7469 Function *F = CGM.getIntrinsic(Int, Ty);
7470 return Builder.CreateCall(F, Ops, "vcvtr");
7471 }
7472
7473 // Determine the type of this overloaded NEON intrinsic.
7474 NeonTypeFlags Type = Result->getZExtValue();
7475 bool usgn = Type.isUnsigned();
7476 bool rightShift = false;
7477
7478 llvm::FixedVectorType *VTy =
7479 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7480 getTarget().hasBFloat16Type());
7481 llvm::Type *Ty = VTy;
7482 if (!Ty)
7483 return nullptr;
7484
7485 // Many NEON builtins have identical semantics and uses in ARM and
7486 // AArch64. Emit these in a single function.
7487 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7488 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7489 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7490 if (Builtin)
7491 return EmitCommonNeonBuiltinExpr(
7492 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7493 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7494
7495 unsigned Int;
7496 switch (BuiltinID) {
7497 default: return nullptr;
7498 case NEON::BI__builtin_neon_vld1q_lane_v:
7499 // Handle 64-bit integer elements as a special case. Use shuffles of
7500 // one-element vectors to avoid poor code for i64 in the backend.
7501 if (VTy->getElementType()->isIntegerTy(64)) {
7502 // Extract the other lane.
7503 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7504 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7505 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7506 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7507 // Load the value as a one-element vector.
7508 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7509 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7510 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7511 Value *Align = getAlignmentValue32(PtrOp0);
7512 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7513 // Combine them.
7514 int Indices[] = {1 - Lane, Lane};
7515 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7516 }
7517 LLVM_FALLTHROUGH;
7518 case NEON::BI__builtin_neon_vld1_lane_v: {
7519 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7520 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7521 Value *Ld = Builder.CreateLoad(PtrOp0);
7522 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7523 }
7524 case NEON::BI__builtin_neon_vqrshrn_n_v:
7525 Int =
7526 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7527 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7528 1, true);
7529 case NEON::BI__builtin_neon_vqrshrun_n_v:
7530 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7531 Ops, "vqrshrun_n", 1, true);
7532 case NEON::BI__builtin_neon_vqshrn_n_v:
7533 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7534 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7535 1, true);
7536 case NEON::BI__builtin_neon_vqshrun_n_v:
7537 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7538 Ops, "vqshrun_n", 1, true);
7539 case NEON::BI__builtin_neon_vrecpe_v:
7540 case NEON::BI__builtin_neon_vrecpeq_v:
7541 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7542 Ops, "vrecpe");
7543 case NEON::BI__builtin_neon_vrshrn_n_v:
7544 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7545 Ops, "vrshrn_n", 1, true);
7546 case NEON::BI__builtin_neon_vrsra_n_v:
7547 case NEON::BI__builtin_neon_vrsraq_n_v:
7548 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7549 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7550 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7551 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7552 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7553 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7554 case NEON::BI__builtin_neon_vsri_n_v:
7555 case NEON::BI__builtin_neon_vsriq_n_v:
7556 rightShift = true;
7557 LLVM_FALLTHROUGH;
7558 case NEON::BI__builtin_neon_vsli_n_v:
7559 case NEON::BI__builtin_neon_vsliq_n_v:
7560 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7561 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7562 Ops, "vsli_n");
7563 case NEON::BI__builtin_neon_vsra_n_v:
7564 case NEON::BI__builtin_neon_vsraq_n_v:
7565 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7566 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7567 return Builder.CreateAdd(Ops[0], Ops[1]);
7568 case NEON::BI__builtin_neon_vst1q_lane_v:
7569 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7570 // a one-element vector and avoid poor code for i64 in the backend.
7571 if (VTy->getElementType()->isIntegerTy(64)) {
7572 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7573 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7574 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7575 Ops[2] = getAlignmentValue32(PtrOp0);
7576 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7577 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7578 Tys), Ops);
7579 }
7580 LLVM_FALLTHROUGH;
7581 case NEON::BI__builtin_neon_vst1_lane_v: {
7582 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7583 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7584 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7585 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7586 return St;
7587 }
7588 case NEON::BI__builtin_neon_vtbl1_v:
7589 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7590 Ops, "vtbl1");
7591 case NEON::BI__builtin_neon_vtbl2_v:
7592 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7593 Ops, "vtbl2");
7594 case NEON::BI__builtin_neon_vtbl3_v:
7595 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7596 Ops, "vtbl3");
7597 case NEON::BI__builtin_neon_vtbl4_v:
7598 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7599 Ops, "vtbl4");
7600 case NEON::BI__builtin_neon_vtbx1_v:
7601 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7602 Ops, "vtbx1");
7603 case NEON::BI__builtin_neon_vtbx2_v:
7604 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7605 Ops, "vtbx2");
7606 case NEON::BI__builtin_neon_vtbx3_v:
7607 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7608 Ops, "vtbx3");
7609 case NEON::BI__builtin_neon_vtbx4_v:
7610 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7611 Ops, "vtbx4");
7612 }
7613 }
7614
7615 template<typename Integer>
GetIntegerConstantValue(const Expr * E,ASTContext & Context)7616 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7617 return E->getIntegerConstantExpr(Context)->getExtValue();
7618 }
7619
SignOrZeroExtend(CGBuilderTy & Builder,llvm::Value * V,llvm::Type * T,bool Unsigned)7620 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7621 llvm::Type *T, bool Unsigned) {
7622 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7623 // which finds it convenient to specify signed/unsigned as a boolean flag.
7624 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7625 }
7626
MVEImmediateShr(CGBuilderTy & Builder,llvm::Value * V,uint32_t Shift,bool Unsigned)7627 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7628 uint32_t Shift, bool Unsigned) {
7629 // MVE helper function for integer shift right. This must handle signed vs
7630 // unsigned, and also deal specially with the case where the shift count is
7631 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7632 // undefined behavior, but in MVE it's legal, so we must convert it to code
7633 // that is not undefined in IR.
7634 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7635 ->getElementType()
7636 ->getPrimitiveSizeInBits();
7637 if (Shift == LaneBits) {
7638 // An unsigned shift of the full lane size always generates zero, so we can
7639 // simply emit a zero vector. A signed shift of the full lane size does the
7640 // same thing as shifting by one bit fewer.
7641 if (Unsigned)
7642 return llvm::Constant::getNullValue(V->getType());
7643 else
7644 --Shift;
7645 }
7646 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7647 }
7648
ARMMVEVectorSplat(CGBuilderTy & Builder,llvm::Value * V)7649 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7650 // MVE-specific helper function for a vector splat, which infers the element
7651 // count of the output vector by knowing that MVE vectors are all 128 bits
7652 // wide.
7653 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7654 return Builder.CreateVectorSplat(Elements, V);
7655 }
7656
ARMMVEVectorReinterpret(CGBuilderTy & Builder,CodeGenFunction * CGF,llvm::Value * V,llvm::Type * DestType)7657 static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7658 CodeGenFunction *CGF,
7659 llvm::Value *V,
7660 llvm::Type *DestType) {
7661 // Convert one MVE vector type into another by reinterpreting its in-register
7662 // format.
7663 //
7664 // Little-endian, this is identical to a bitcast (which reinterprets the
7665 // memory format). But big-endian, they're not necessarily the same, because
7666 // the register and memory formats map to each other differently depending on
7667 // the lane size.
7668 //
7669 // We generate a bitcast whenever we can (if we're little-endian, or if the
7670 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7671 // that performs the different kind of reinterpretation.
7672 if (CGF->getTarget().isBigEndian() &&
7673 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7674 return Builder.CreateCall(
7675 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7676 {DestType, V->getType()}),
7677 V);
7678 } else {
7679 return Builder.CreateBitCast(V, DestType);
7680 }
7681 }
7682
VectorUnzip(CGBuilderTy & Builder,llvm::Value * V,bool Odd)7683 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7684 // Make a shufflevector that extracts every other element of a vector (evens
7685 // or odds, as desired).
7686 SmallVector<int, 16> Indices;
7687 unsigned InputElements =
7688 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
7689 for (unsigned i = 0; i < InputElements; i += 2)
7690 Indices.push_back(i + Odd);
7691 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7692 Indices);
7693 }
7694
VectorZip(CGBuilderTy & Builder,llvm::Value * V0,llvm::Value * V1)7695 static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
7696 llvm::Value *V1) {
7697 // Make a shufflevector that interleaves two vectors element by element.
7698 assert(V0->getType() == V1->getType() && "Can't zip different vector types");
7699 SmallVector<int, 16> Indices;
7700 unsigned InputElements =
7701 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
7702 for (unsigned i = 0; i < InputElements; i++) {
7703 Indices.push_back(i);
7704 Indices.push_back(i + InputElements);
7705 }
7706 return Builder.CreateShuffleVector(V0, V1, Indices);
7707 }
7708
7709 template<unsigned HighBit, unsigned OtherBits>
ARMMVEConstantSplat(CGBuilderTy & Builder,llvm::Type * VT)7710 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
7711 // MVE-specific helper function to make a vector splat of a constant such as
7712 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
7713 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
7714 unsigned LaneBits = T->getPrimitiveSizeInBits();
7715 uint32_t Value = HighBit << (LaneBits - 1);
7716 if (OtherBits)
7717 Value |= (1UL << (LaneBits - 1)) - 1;
7718 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
7719 return ARMMVEVectorSplat(Builder, Lane);
7720 }
7721
ARMMVEVectorElementReverse(CGBuilderTy & Builder,llvm::Value * V,unsigned ReverseWidth)7722 static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
7723 llvm::Value *V,
7724 unsigned ReverseWidth) {
7725 // MVE-specific helper function which reverses the elements of a
7726 // vector within every (ReverseWidth)-bit collection of lanes.
7727 SmallVector<int, 16> Indices;
7728 unsigned LaneSize = V->getType()->getScalarSizeInBits();
7729 unsigned Elements = 128 / LaneSize;
7730 unsigned Mask = ReverseWidth / LaneSize - 1;
7731 for (unsigned i = 0; i < Elements; i++)
7732 Indices.push_back(i ^ Mask);
7733 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7734 Indices);
7735 }
7736
EmitARMMVEBuiltinExpr(unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Triple::ArchType Arch)7737 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
7738 const CallExpr *E,
7739 ReturnValueSlot ReturnValue,
7740 llvm::Triple::ArchType Arch) {
7741 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
7742 Intrinsic::ID IRIntr;
7743 unsigned NumVectors;
7744
7745 // Code autogenerated by Tablegen will handle all the simple builtins.
7746 switch (BuiltinID) {
7747 #include "clang/Basic/arm_mve_builtin_cg.inc"
7748
7749 // If we didn't match an MVE builtin id at all, go back to the
7750 // main EmitARMBuiltinExpr.
7751 default:
7752 return nullptr;
7753 }
7754
7755 // Anything that breaks from that switch is an MVE builtin that
7756 // needs handwritten code to generate.
7757
7758 switch (CustomCodeGenType) {
7759
7760 case CustomCodeGen::VLD24: {
7761 llvm::SmallVector<Value *, 4> Ops;
7762 llvm::SmallVector<llvm::Type *, 4> Tys;
7763
7764 auto MvecCType = E->getType();
7765 auto MvecLType = ConvertType(MvecCType);
7766 assert(MvecLType->isStructTy() &&
7767 "Return type for vld[24]q should be a struct");
7768 assert(MvecLType->getStructNumElements() == 1 &&
7769 "Return-type struct for vld[24]q should have one element");
7770 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7771 assert(MvecLTypeInner->isArrayTy() &&
7772 "Return-type struct for vld[24]q should contain an array");
7773 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7774 "Array member of return-type struct vld[24]q has wrong length");
7775 auto VecLType = MvecLTypeInner->getArrayElementType();
7776
7777 Tys.push_back(VecLType);
7778
7779 auto Addr = E->getArg(0);
7780 Ops.push_back(EmitScalarExpr(Addr));
7781 Tys.push_back(ConvertType(Addr->getType()));
7782
7783 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7784 Value *LoadResult = Builder.CreateCall(F, Ops);
7785 Value *MvecOut = UndefValue::get(MvecLType);
7786 for (unsigned i = 0; i < NumVectors; ++i) {
7787 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7788 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7789 }
7790
7791 if (ReturnValue.isNull())
7792 return MvecOut;
7793 else
7794 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7795 }
7796
7797 case CustomCodeGen::VST24: {
7798 llvm::SmallVector<Value *, 4> Ops;
7799 llvm::SmallVector<llvm::Type *, 4> Tys;
7800
7801 auto Addr = E->getArg(0);
7802 Ops.push_back(EmitScalarExpr(Addr));
7803 Tys.push_back(ConvertType(Addr->getType()));
7804
7805 auto MvecCType = E->getArg(1)->getType();
7806 auto MvecLType = ConvertType(MvecCType);
7807 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
7808 assert(MvecLType->getStructNumElements() == 1 &&
7809 "Data-type struct for vst2q should have one element");
7810 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7811 assert(MvecLTypeInner->isArrayTy() &&
7812 "Data-type struct for vst2q should contain an array");
7813 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7814 "Array member of return-type struct vld[24]q has wrong length");
7815 auto VecLType = MvecLTypeInner->getArrayElementType();
7816
7817 Tys.push_back(VecLType);
7818
7819 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7820 EmitAggExpr(E->getArg(1), MvecSlot);
7821 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7822 for (unsigned i = 0; i < NumVectors; i++)
7823 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7824
7825 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7826 Value *ToReturn = nullptr;
7827 for (unsigned i = 0; i < NumVectors; i++) {
7828 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7829 ToReturn = Builder.CreateCall(F, Ops);
7830 Ops.pop_back();
7831 }
7832 return ToReturn;
7833 }
7834 }
7835 llvm_unreachable("unknown custom codegen type.");
7836 }
7837
EmitARMCDEBuiltinExpr(unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Triple::ArchType Arch)7838 Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
7839 const CallExpr *E,
7840 ReturnValueSlot ReturnValue,
7841 llvm::Triple::ArchType Arch) {
7842 switch (BuiltinID) {
7843 default:
7844 return nullptr;
7845 #include "clang/Basic/arm_cde_builtin_cg.inc"
7846 }
7847 }
7848
EmitAArch64TblBuiltinExpr(CodeGenFunction & CGF,unsigned BuiltinID,const CallExpr * E,SmallVectorImpl<Value * > & Ops,llvm::Triple::ArchType Arch)7849 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7850 const CallExpr *E,
7851 SmallVectorImpl<Value *> &Ops,
7852 llvm::Triple::ArchType Arch) {
7853 unsigned int Int = 0;
7854 const char *s = nullptr;
7855
7856 switch (BuiltinID) {
7857 default:
7858 return nullptr;
7859 case NEON::BI__builtin_neon_vtbl1_v:
7860 case NEON::BI__builtin_neon_vqtbl1_v:
7861 case NEON::BI__builtin_neon_vqtbl1q_v:
7862 case NEON::BI__builtin_neon_vtbl2_v:
7863 case NEON::BI__builtin_neon_vqtbl2_v:
7864 case NEON::BI__builtin_neon_vqtbl2q_v:
7865 case NEON::BI__builtin_neon_vtbl3_v:
7866 case NEON::BI__builtin_neon_vqtbl3_v:
7867 case NEON::BI__builtin_neon_vqtbl3q_v:
7868 case NEON::BI__builtin_neon_vtbl4_v:
7869 case NEON::BI__builtin_neon_vqtbl4_v:
7870 case NEON::BI__builtin_neon_vqtbl4q_v:
7871 break;
7872 case NEON::BI__builtin_neon_vtbx1_v:
7873 case NEON::BI__builtin_neon_vqtbx1_v:
7874 case NEON::BI__builtin_neon_vqtbx1q_v:
7875 case NEON::BI__builtin_neon_vtbx2_v:
7876 case NEON::BI__builtin_neon_vqtbx2_v:
7877 case NEON::BI__builtin_neon_vqtbx2q_v:
7878 case NEON::BI__builtin_neon_vtbx3_v:
7879 case NEON::BI__builtin_neon_vqtbx3_v:
7880 case NEON::BI__builtin_neon_vqtbx3q_v:
7881 case NEON::BI__builtin_neon_vtbx4_v:
7882 case NEON::BI__builtin_neon_vqtbx4_v:
7883 case NEON::BI__builtin_neon_vqtbx4q_v:
7884 break;
7885 }
7886
7887 assert(E->getNumArgs() >= 3);
7888
7889 // Get the last argument, which specifies the vector type.
7890 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7891 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
7892 if (!Result)
7893 return nullptr;
7894
7895 // Determine the type of this overloaded NEON intrinsic.
7896 NeonTypeFlags Type = Result->getZExtValue();
7897 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
7898 if (!Ty)
7899 return nullptr;
7900
7901 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7902
7903 // AArch64 scalar builtins are not overloaded, they do not have an extra
7904 // argument that specifies the vector type, need to handle each case.
7905 switch (BuiltinID) {
7906 case NEON::BI__builtin_neon_vtbl1_v: {
7907 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7908 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7909 "vtbl1");
7910 }
7911 case NEON::BI__builtin_neon_vtbl2_v: {
7912 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7913 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7914 "vtbl1");
7915 }
7916 case NEON::BI__builtin_neon_vtbl3_v: {
7917 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7918 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7919 "vtbl2");
7920 }
7921 case NEON::BI__builtin_neon_vtbl4_v: {
7922 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7923 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7924 "vtbl2");
7925 }
7926 case NEON::BI__builtin_neon_vtbx1_v: {
7927 Value *TblRes =
7928 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7929 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7930
7931 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7932 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7933 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7934
7935 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7936 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7937 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7938 }
7939 case NEON::BI__builtin_neon_vtbx2_v: {
7940 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7941 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7942 "vtbx1");
7943 }
7944 case NEON::BI__builtin_neon_vtbx3_v: {
7945 Value *TblRes =
7946 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7947 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7948
7949 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7950 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7951 TwentyFourV);
7952 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7953
7954 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7955 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7956 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7957 }
7958 case NEON::BI__builtin_neon_vtbx4_v: {
7959 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7960 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7961 "vtbx2");
7962 }
7963 case NEON::BI__builtin_neon_vqtbl1_v:
7964 case NEON::BI__builtin_neon_vqtbl1q_v:
7965 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7966 case NEON::BI__builtin_neon_vqtbl2_v:
7967 case NEON::BI__builtin_neon_vqtbl2q_v: {
7968 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7969 case NEON::BI__builtin_neon_vqtbl3_v:
7970 case NEON::BI__builtin_neon_vqtbl3q_v:
7971 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7972 case NEON::BI__builtin_neon_vqtbl4_v:
7973 case NEON::BI__builtin_neon_vqtbl4q_v:
7974 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7975 case NEON::BI__builtin_neon_vqtbx1_v:
7976 case NEON::BI__builtin_neon_vqtbx1q_v:
7977 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7978 case NEON::BI__builtin_neon_vqtbx2_v:
7979 case NEON::BI__builtin_neon_vqtbx2q_v:
7980 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7981 case NEON::BI__builtin_neon_vqtbx3_v:
7982 case NEON::BI__builtin_neon_vqtbx3q_v:
7983 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7984 case NEON::BI__builtin_neon_vqtbx4_v:
7985 case NEON::BI__builtin_neon_vqtbx4q_v:
7986 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7987 }
7988 }
7989
7990 if (!Int)
7991 return nullptr;
7992
7993 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7994 return CGF.EmitNeonCall(F, Ops, s);
7995 }
7996
vectorWrapScalar16(Value * Op)7997 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7998 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
7999 Op = Builder.CreateBitCast(Op, Int16Ty);
8000 Value *V = UndefValue::get(VTy);
8001 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
8002 Op = Builder.CreateInsertElement(V, Op, CI);
8003 return Op;
8004 }
8005
8006 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
8007 /// access builtin. Only required if it can't be inferred from the base pointer
8008 /// operand.
SVEBuiltinMemEltTy(SVETypeFlags TypeFlags)8009 llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
8010 switch (TypeFlags.getMemEltType()) {
8011 case SVETypeFlags::MemEltTyDefault:
8012 return getEltType(TypeFlags);
8013 case SVETypeFlags::MemEltTyInt8:
8014 return Builder.getInt8Ty();
8015 case SVETypeFlags::MemEltTyInt16:
8016 return Builder.getInt16Ty();
8017 case SVETypeFlags::MemEltTyInt32:
8018 return Builder.getInt32Ty();
8019 case SVETypeFlags::MemEltTyInt64:
8020 return Builder.getInt64Ty();
8021 }
8022 llvm_unreachable("Unknown MemEltType");
8023 }
8024
getEltType(SVETypeFlags TypeFlags)8025 llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
8026 switch (TypeFlags.getEltType()) {
8027 default:
8028 llvm_unreachable("Invalid SVETypeFlag!");
8029
8030 case SVETypeFlags::EltTyInt8:
8031 return Builder.getInt8Ty();
8032 case SVETypeFlags::EltTyInt16:
8033 return Builder.getInt16Ty();
8034 case SVETypeFlags::EltTyInt32:
8035 return Builder.getInt32Ty();
8036 case SVETypeFlags::EltTyInt64:
8037 return Builder.getInt64Ty();
8038
8039 case SVETypeFlags::EltTyFloat16:
8040 return Builder.getHalfTy();
8041 case SVETypeFlags::EltTyFloat32:
8042 return Builder.getFloatTy();
8043 case SVETypeFlags::EltTyFloat64:
8044 return Builder.getDoubleTy();
8045
8046 case SVETypeFlags::EltTyBFloat16:
8047 return Builder.getBFloatTy();
8048
8049 case SVETypeFlags::EltTyBool8:
8050 case SVETypeFlags::EltTyBool16:
8051 case SVETypeFlags::EltTyBool32:
8052 case SVETypeFlags::EltTyBool64:
8053 return Builder.getInt1Ty();
8054 }
8055 }
8056
8057 // Return the llvm predicate vector type corresponding to the specified element
8058 // TypeFlags.
8059 llvm::ScalableVectorType *
getSVEPredType(SVETypeFlags TypeFlags)8060 CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
8061 switch (TypeFlags.getEltType()) {
8062 default: llvm_unreachable("Unhandled SVETypeFlag!");
8063
8064 case SVETypeFlags::EltTyInt8:
8065 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8066 case SVETypeFlags::EltTyInt16:
8067 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8068 case SVETypeFlags::EltTyInt32:
8069 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8070 case SVETypeFlags::EltTyInt64:
8071 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8072
8073 case SVETypeFlags::EltTyBFloat16:
8074 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8075 case SVETypeFlags::EltTyFloat16:
8076 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8077 case SVETypeFlags::EltTyFloat32:
8078 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8079 case SVETypeFlags::EltTyFloat64:
8080 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8081
8082 case SVETypeFlags::EltTyBool8:
8083 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8084 case SVETypeFlags::EltTyBool16:
8085 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8086 case SVETypeFlags::EltTyBool32:
8087 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8088 case SVETypeFlags::EltTyBool64:
8089 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8090 }
8091 }
8092
8093 // Return the llvm vector type corresponding to the specified element TypeFlags.
8094 llvm::ScalableVectorType *
getSVEType(const SVETypeFlags & TypeFlags)8095 CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
8096 switch (TypeFlags.getEltType()) {
8097 default:
8098 llvm_unreachable("Invalid SVETypeFlag!");
8099
8100 case SVETypeFlags::EltTyInt8:
8101 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
8102 case SVETypeFlags::EltTyInt16:
8103 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
8104 case SVETypeFlags::EltTyInt32:
8105 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
8106 case SVETypeFlags::EltTyInt64:
8107 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
8108
8109 case SVETypeFlags::EltTyFloat16:
8110 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
8111 case SVETypeFlags::EltTyBFloat16:
8112 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
8113 case SVETypeFlags::EltTyFloat32:
8114 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
8115 case SVETypeFlags::EltTyFloat64:
8116 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
8117
8118 case SVETypeFlags::EltTyBool8:
8119 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8120 case SVETypeFlags::EltTyBool16:
8121 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8122 case SVETypeFlags::EltTyBool32:
8123 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8124 case SVETypeFlags::EltTyBool64:
8125 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8126 }
8127 }
8128
EmitSVEAllTruePred(SVETypeFlags TypeFlags)8129 llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
8130 Function *Ptrue =
8131 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
8132 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
8133 }
8134
8135 constexpr unsigned SVEBitsPerBlock = 128;
8136
getSVEVectorForElementType(llvm::Type * EltTy)8137 static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
8138 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
8139 return llvm::ScalableVectorType::get(EltTy, NumElts);
8140 }
8141
8142 // Reinterpret the input predicate so that it can be used to correctly isolate
8143 // the elements of the specified datatype.
EmitSVEPredicateCast(Value * Pred,llvm::ScalableVectorType * VTy)8144 Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
8145 llvm::ScalableVectorType *VTy) {
8146 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
8147 if (Pred->getType() == RTy)
8148 return Pred;
8149
8150 unsigned IntID;
8151 llvm::Type *IntrinsicTy;
8152 switch (VTy->getMinNumElements()) {
8153 default:
8154 llvm_unreachable("unsupported element count!");
8155 case 2:
8156 case 4:
8157 case 8:
8158 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
8159 IntrinsicTy = RTy;
8160 break;
8161 case 16:
8162 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
8163 IntrinsicTy = Pred->getType();
8164 break;
8165 }
8166
8167 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
8168 Value *C = Builder.CreateCall(F, Pred);
8169 assert(C->getType() == RTy && "Unexpected return type!");
8170 return C;
8171 }
8172
EmitSVEGatherLoad(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned IntID)8173 Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
8174 SmallVectorImpl<Value *> &Ops,
8175 unsigned IntID) {
8176 auto *ResultTy = getSVEType(TypeFlags);
8177 auto *OverloadedTy =
8178 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
8179
8180 // At the ACLE level there's only one predicate type, svbool_t, which is
8181 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8182 // actual type being loaded. For example, when loading doubles (i64) the
8183 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8184 // the predicate and the data being loaded must match. Cast accordingly.
8185 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8186
8187 Function *F = nullptr;
8188 if (Ops[1]->getType()->isVectorTy())
8189 // This is the "vector base, scalar offset" case. In order to uniquely
8190 // map this built-in to an LLVM IR intrinsic, we need both the return type
8191 // and the type of the vector base.
8192 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
8193 else
8194 // This is the "scalar base, vector offset case". The type of the offset
8195 // is encoded in the name of the intrinsic. We only need to specify the
8196 // return type in order to uniquely map this built-in to an LLVM IR
8197 // intrinsic.
8198 F = CGM.getIntrinsic(IntID, OverloadedTy);
8199
8200 // Pass 0 when the offset is missing. This can only be applied when using
8201 // the "vector base" addressing mode for which ACLE allows no offset. The
8202 // corresponding LLVM IR always requires an offset.
8203 if (Ops.size() == 2) {
8204 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8205 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8206 }
8207
8208 // For "vector base, scalar index" scale the index so that it becomes a
8209 // scalar offset.
8210 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
8211 unsigned BytesPerElt =
8212 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8213 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8214 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8215 }
8216
8217 Value *Call = Builder.CreateCall(F, Ops);
8218
8219 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
8220 // other cases it's folded into a nop.
8221 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
8222 : Builder.CreateSExt(Call, ResultTy);
8223 }
8224
EmitSVEScatterStore(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned IntID)8225 Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
8226 SmallVectorImpl<Value *> &Ops,
8227 unsigned IntID) {
8228 auto *SrcDataTy = getSVEType(TypeFlags);
8229 auto *OverloadedTy =
8230 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
8231
8232 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
8233 // it's the first argument. Move it accordingly.
8234 Ops.insert(Ops.begin(), Ops.pop_back_val());
8235
8236 Function *F = nullptr;
8237 if (Ops[2]->getType()->isVectorTy())
8238 // This is the "vector base, scalar offset" case. In order to uniquely
8239 // map this built-in to an LLVM IR intrinsic, we need both the return type
8240 // and the type of the vector base.
8241 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
8242 else
8243 // This is the "scalar base, vector offset case". The type of the offset
8244 // is encoded in the name of the intrinsic. We only need to specify the
8245 // return type in order to uniquely map this built-in to an LLVM IR
8246 // intrinsic.
8247 F = CGM.getIntrinsic(IntID, OverloadedTy);
8248
8249 // Pass 0 when the offset is missing. This can only be applied when using
8250 // the "vector base" addressing mode for which ACLE allows no offset. The
8251 // corresponding LLVM IR always requires an offset.
8252 if (Ops.size() == 3) {
8253 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8254 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8255 }
8256
8257 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
8258 // folded into a nop.
8259 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
8260
8261 // At the ACLE level there's only one predicate type, svbool_t, which is
8262 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8263 // actual type being stored. For example, when storing doubles (i64) the
8264 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8265 // the predicate and the data being stored must match. Cast accordingly.
8266 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
8267
8268 // For "vector base, scalar index" scale the index so that it becomes a
8269 // scalar offset.
8270 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
8271 unsigned BytesPerElt =
8272 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8273 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8274 Ops[3] = Builder.CreateMul(Ops[3], Scale);
8275 }
8276
8277 return Builder.CreateCall(F, Ops);
8278 }
8279
EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned IntID)8280 Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
8281 SmallVectorImpl<Value *> &Ops,
8282 unsigned IntID) {
8283 // The gather prefetches are overloaded on the vector input - this can either
8284 // be the vector of base addresses or vector of offsets.
8285 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
8286 if (!OverloadedTy)
8287 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
8288
8289 // Cast the predicate from svbool_t to the right number of elements.
8290 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8291
8292 // vector + imm addressing modes
8293 if (Ops[1]->getType()->isVectorTy()) {
8294 if (Ops.size() == 3) {
8295 // Pass 0 for 'vector+imm' when the index is omitted.
8296 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8297
8298 // The sv_prfop is the last operand in the builtin and IR intrinsic.
8299 std::swap(Ops[2], Ops[3]);
8300 } else {
8301 // Index needs to be passed as scaled offset.
8302 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8303 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
8304 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8305 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8306 }
8307 }
8308
8309 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
8310 return Builder.CreateCall(F, Ops);
8311 }
8312
EmitSVEStructLoad(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned IntID)8313 Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
8314 SmallVectorImpl<Value*> &Ops,
8315 unsigned IntID) {
8316 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8317 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8318 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8319
8320 unsigned N;
8321 switch (IntID) {
8322 case Intrinsic::aarch64_sve_ld2:
8323 N = 2;
8324 break;
8325 case Intrinsic::aarch64_sve_ld3:
8326 N = 3;
8327 break;
8328 case Intrinsic::aarch64_sve_ld4:
8329 N = 4;
8330 break;
8331 default:
8332 llvm_unreachable("unknown intrinsic!");
8333 }
8334 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8335 VTy->getElementCount() * N);
8336
8337 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8338 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8339 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8340 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8341 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8342
8343 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8344 return Builder.CreateCall(F, { Predicate, BasePtr });
8345 }
8346
EmitSVEStructStore(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned IntID)8347 Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8348 SmallVectorImpl<Value*> &Ops,
8349 unsigned IntID) {
8350 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8351 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8352 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8353
8354 unsigned N;
8355 switch (IntID) {
8356 case Intrinsic::aarch64_sve_st2:
8357 N = 2;
8358 break;
8359 case Intrinsic::aarch64_sve_st3:
8360 N = 3;
8361 break;
8362 case Intrinsic::aarch64_sve_st4:
8363 N = 4;
8364 break;
8365 default:
8366 llvm_unreachable("unknown intrinsic!");
8367 }
8368 auto TupleTy =
8369 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8370
8371 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8372 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8373 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8374 Value *Val = Ops.back();
8375 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8376 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8377
8378 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8379 // need to break up the tuple vector.
8380 SmallVector<llvm::Value*, 5> Operands;
8381 Function *FExtr =
8382 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8383 for (unsigned I = 0; I < N; ++I)
8384 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8385 Operands.append({Predicate, BasePtr});
8386
8387 Function *F = CGM.getIntrinsic(IntID, { VTy });
8388 return Builder.CreateCall(F, Operands);
8389 }
8390
8391 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8392 // svpmullt_pair intrinsics, with the exception that their results are bitcast
8393 // to a wider type.
EmitSVEPMull(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned BuiltinID)8394 Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8395 SmallVectorImpl<Value *> &Ops,
8396 unsigned BuiltinID) {
8397 // Splat scalar operand to vector (intrinsics with _n infix)
8398 if (TypeFlags.hasSplatOperand()) {
8399 unsigned OpNo = TypeFlags.getSplatOperand();
8400 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8401 }
8402
8403 // The pair-wise function has a narrower overloaded type.
8404 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8405 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8406
8407 // Now bitcast to the wider result type.
8408 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8409 return EmitSVEReinterpret(Call, Ty);
8410 }
8411
EmitSVEMovl(SVETypeFlags TypeFlags,ArrayRef<Value * > Ops,unsigned BuiltinID)8412 Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8413 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8414 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8415 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8416 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8417 }
8418
EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,SmallVectorImpl<Value * > & Ops,unsigned BuiltinID)8419 Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8420 SmallVectorImpl<Value *> &Ops,
8421 unsigned BuiltinID) {
8422 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8423 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8424 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8425
8426 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8427 Value *BasePtr = Ops[1];
8428
8429 // Implement the index operand if not omitted.
8430 if (Ops.size() > 3) {
8431 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8432 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8433 }
8434
8435 // Prefetch intriniscs always expect an i8*
8436 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8437 Value *PrfOp = Ops.back();
8438
8439 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8440 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8441 }
8442
EmitSVEMaskedLoad(const CallExpr * E,llvm::Type * ReturnTy,SmallVectorImpl<Value * > & Ops,unsigned BuiltinID,bool IsZExtReturn)8443 Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8444 llvm::Type *ReturnTy,
8445 SmallVectorImpl<Value *> &Ops,
8446 unsigned BuiltinID,
8447 bool IsZExtReturn) {
8448 QualType LangPTy = E->getArg(1)->getType();
8449 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8450 LangPTy->getAs<PointerType>()->getPointeeType());
8451
8452 // The vector type that is returned may be different from the
8453 // eventual type loaded from memory.
8454 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8455 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8456
8457 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8458 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8459 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8460 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8461
8462 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8463 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8464 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8465
8466 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8467 : Builder.CreateSExt(Load, VectorTy);
8468 }
8469
EmitSVEMaskedStore(const CallExpr * E,SmallVectorImpl<Value * > & Ops,unsigned BuiltinID)8470 Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8471 SmallVectorImpl<Value *> &Ops,
8472 unsigned BuiltinID) {
8473 QualType LangPTy = E->getArg(1)->getType();
8474 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8475 LangPTy->getAs<PointerType>()->getPointeeType());
8476
8477 // The vector type that is stored may be different from the
8478 // eventual type stored to memory.
8479 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8480 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8481
8482 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8483 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8484 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8485 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8486
8487 // Last value is always the data
8488 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8489
8490 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8491 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8492 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8493 }
8494
8495 // Limit the usage of scalable llvm IR generated by the ACLE by using the
8496 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
EmitSVEDupX(Value * Scalar,llvm::Type * Ty)8497 Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8498 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8499 return Builder.CreateCall(F, Scalar);
8500 }
8501
EmitSVEDupX(Value * Scalar)8502 Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8503 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8504 }
8505
EmitSVEReinterpret(Value * Val,llvm::Type * Ty)8506 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8507 // FIXME: For big endian this needs an additional REV, or needs a separate
8508 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8509 // instruction is defined as 'bitwise' equivalent from memory point of
8510 // view (when storing/reloading), whereas the svreinterpret builtin
8511 // implements bitwise equivalent cast from register point of view.
8512 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8513 return Builder.CreateBitCast(Val, Ty);
8514 }
8515
InsertExplicitZeroOperand(CGBuilderTy & Builder,llvm::Type * Ty,SmallVectorImpl<Value * > & Ops)8516 static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8517 SmallVectorImpl<Value *> &Ops) {
8518 auto *SplatZero = Constant::getNullValue(Ty);
8519 Ops.insert(Ops.begin(), SplatZero);
8520 }
8521
InsertExplicitUndefOperand(CGBuilderTy & Builder,llvm::Type * Ty,SmallVectorImpl<Value * > & Ops)8522 static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8523 SmallVectorImpl<Value *> &Ops) {
8524 auto *SplatUndef = UndefValue::get(Ty);
8525 Ops.insert(Ops.begin(), SplatUndef);
8526 }
8527
getSVEOverloadTypes(SVETypeFlags TypeFlags,llvm::Type * ResultType,ArrayRef<Value * > Ops)8528 SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8529 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8530 if (TypeFlags.isOverloadNone())
8531 return {};
8532
8533 llvm::Type *DefaultType = getSVEType(TypeFlags);
8534
8535 if (TypeFlags.isOverloadWhile())
8536 return {DefaultType, Ops[1]->getType()};
8537
8538 if (TypeFlags.isOverloadWhileRW())
8539 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8540
8541 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8542 return {Ops[0]->getType(), Ops.back()->getType()};
8543
8544 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8545 return {ResultType, Ops[0]->getType()};
8546
8547 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
8548 return {DefaultType};
8549 }
8550
EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,const CallExpr * E)8551 Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8552 const CallExpr *E) {
8553 // Find out if any arguments are required to be integer constant expressions.
8554 unsigned ICEArguments = 0;
8555 ASTContext::GetBuiltinTypeError Error;
8556 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8557 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8558
8559 llvm::Type *Ty = ConvertType(E->getType());
8560 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8561 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8562 Value *Val = EmitScalarExpr(E->getArg(0));
8563 return EmitSVEReinterpret(Val, Ty);
8564 }
8565
8566 llvm::SmallVector<Value *, 4> Ops;
8567 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8568 if ((ICEArguments & (1 << i)) == 0)
8569 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8570 else {
8571 // If this is required to be a constant, constant fold it so that we know
8572 // that the generated intrinsic gets a ConstantInt.
8573 Optional<llvm::APSInt> Result =
8574 E->getArg(i)->getIntegerConstantExpr(getContext());
8575 assert(Result && "Expected argument to be a constant");
8576
8577 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8578 // truncate because the immediate has been range checked and no valid
8579 // immediate requires more than a handful of bits.
8580 *Result = Result->extOrTrunc(32);
8581 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
8582 }
8583 }
8584
8585 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8586 AArch64SVEIntrinsicsProvenSorted);
8587 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8588 if (TypeFlags.isLoad())
8589 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8590 TypeFlags.isZExtReturn());
8591 else if (TypeFlags.isStore())
8592 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8593 else if (TypeFlags.isGatherLoad())
8594 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8595 else if (TypeFlags.isScatterStore())
8596 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8597 else if (TypeFlags.isPrefetch())
8598 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8599 else if (TypeFlags.isGatherPrefetch())
8600 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8601 else if (TypeFlags.isStructLoad())
8602 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8603 else if (TypeFlags.isStructStore())
8604 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8605 else if (TypeFlags.isUndef())
8606 return UndefValue::get(Ty);
8607 else if (Builtin->LLVMIntrinsic != 0) {
8608 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8609 InsertExplicitZeroOperand(Builder, Ty, Ops);
8610
8611 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8612 InsertExplicitUndefOperand(Builder, Ty, Ops);
8613
8614 // Some ACLE builtins leave out the argument to specify the predicate
8615 // pattern, which is expected to be expanded to an SV_ALL pattern.
8616 if (TypeFlags.isAppendSVALL())
8617 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8618 if (TypeFlags.isInsertOp1SVALL())
8619 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8620
8621 // Predicates must match the main datatype.
8622 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8623 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8624 if (PredTy->getElementType()->isIntegerTy(1))
8625 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8626
8627 // Splat scalar operand to vector (intrinsics with _n infix)
8628 if (TypeFlags.hasSplatOperand()) {
8629 unsigned OpNo = TypeFlags.getSplatOperand();
8630 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8631 }
8632
8633 if (TypeFlags.isReverseCompare())
8634 std::swap(Ops[1], Ops[2]);
8635
8636 if (TypeFlags.isReverseUSDOT())
8637 std::swap(Ops[1], Ops[2]);
8638
8639 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8640 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8641 llvm::Type *OpndTy = Ops[1]->getType();
8642 auto *SplatZero = Constant::getNullValue(OpndTy);
8643 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8644 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8645 }
8646
8647 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8648 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8649 Value *Call = Builder.CreateCall(F, Ops);
8650
8651 // Predicate results must be converted to svbool_t.
8652 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8653 if (PredTy->getScalarType()->isIntegerTy(1))
8654 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8655
8656 return Call;
8657 }
8658
8659 switch (BuiltinID) {
8660 default:
8661 return nullptr;
8662
8663 case SVE::BI__builtin_sve_svmov_b_z: {
8664 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8665 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8666 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8667 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8668 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8669 }
8670
8671 case SVE::BI__builtin_sve_svnot_b_z: {
8672 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8673 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8674 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8675 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8676 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8677 }
8678
8679 case SVE::BI__builtin_sve_svmovlb_u16:
8680 case SVE::BI__builtin_sve_svmovlb_u32:
8681 case SVE::BI__builtin_sve_svmovlb_u64:
8682 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8683
8684 case SVE::BI__builtin_sve_svmovlb_s16:
8685 case SVE::BI__builtin_sve_svmovlb_s32:
8686 case SVE::BI__builtin_sve_svmovlb_s64:
8687 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
8688
8689 case SVE::BI__builtin_sve_svmovlt_u16:
8690 case SVE::BI__builtin_sve_svmovlt_u32:
8691 case SVE::BI__builtin_sve_svmovlt_u64:
8692 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
8693
8694 case SVE::BI__builtin_sve_svmovlt_s16:
8695 case SVE::BI__builtin_sve_svmovlt_s32:
8696 case SVE::BI__builtin_sve_svmovlt_s64:
8697 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
8698
8699 case SVE::BI__builtin_sve_svpmullt_u16:
8700 case SVE::BI__builtin_sve_svpmullt_u64:
8701 case SVE::BI__builtin_sve_svpmullt_n_u16:
8702 case SVE::BI__builtin_sve_svpmullt_n_u64:
8703 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
8704
8705 case SVE::BI__builtin_sve_svpmullb_u16:
8706 case SVE::BI__builtin_sve_svpmullb_u64:
8707 case SVE::BI__builtin_sve_svpmullb_n_u16:
8708 case SVE::BI__builtin_sve_svpmullb_n_u64:
8709 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
8710
8711 case SVE::BI__builtin_sve_svdup_n_b8:
8712 case SVE::BI__builtin_sve_svdup_n_b16:
8713 case SVE::BI__builtin_sve_svdup_n_b32:
8714 case SVE::BI__builtin_sve_svdup_n_b64: {
8715 Value *CmpNE =
8716 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
8717 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
8718 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
8719 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
8720 }
8721
8722 case SVE::BI__builtin_sve_svdupq_n_b8:
8723 case SVE::BI__builtin_sve_svdupq_n_b16:
8724 case SVE::BI__builtin_sve_svdupq_n_b32:
8725 case SVE::BI__builtin_sve_svdupq_n_b64:
8726 case SVE::BI__builtin_sve_svdupq_n_u8:
8727 case SVE::BI__builtin_sve_svdupq_n_s8:
8728 case SVE::BI__builtin_sve_svdupq_n_u64:
8729 case SVE::BI__builtin_sve_svdupq_n_f64:
8730 case SVE::BI__builtin_sve_svdupq_n_s64:
8731 case SVE::BI__builtin_sve_svdupq_n_u16:
8732 case SVE::BI__builtin_sve_svdupq_n_f16:
8733 case SVE::BI__builtin_sve_svdupq_n_bf16:
8734 case SVE::BI__builtin_sve_svdupq_n_s16:
8735 case SVE::BI__builtin_sve_svdupq_n_u32:
8736 case SVE::BI__builtin_sve_svdupq_n_f32:
8737 case SVE::BI__builtin_sve_svdupq_n_s32: {
8738 // These builtins are implemented by storing each element to an array and using
8739 // ld1rq to materialize a vector.
8740 unsigned NumOpnds = Ops.size();
8741
8742 bool IsBoolTy =
8743 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
8744
8745 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
8746 // so that the compare can use the width that is natural for the expected
8747 // number of predicate lanes.
8748 llvm::Type *EltTy = Ops[0]->getType();
8749 if (IsBoolTy)
8750 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
8751
8752 Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
8753 CharUnits::fromQuantity(16));
8754 for (unsigned I = 0; I < NumOpnds; ++I)
8755 Builder.CreateDefaultAlignedStore(
8756 IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
8757 Builder.CreateGEP(Alloca.getPointer(),
8758 {Builder.getInt64(0), Builder.getInt64(I)}));
8759
8760 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8761 Value *Pred = EmitSVEAllTruePred(TypeFlags);
8762
8763 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
8764 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
8765 Value *Alloca0 = Builder.CreateGEP(
8766 Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
8767 Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
8768
8769 if (!IsBoolTy)
8770 return LD1RQ;
8771
8772 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
8773 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
8774 : Intrinsic::aarch64_sve_cmpne_wide,
8775 OverloadedTy);
8776 Value *Call =
8777 Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
8778 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8779 }
8780
8781 case SVE::BI__builtin_sve_svpfalse_b:
8782 return ConstantInt::getFalse(Ty);
8783
8784 case SVE::BI__builtin_sve_svlen_bf16:
8785 case SVE::BI__builtin_sve_svlen_f16:
8786 case SVE::BI__builtin_sve_svlen_f32:
8787 case SVE::BI__builtin_sve_svlen_f64:
8788 case SVE::BI__builtin_sve_svlen_s8:
8789 case SVE::BI__builtin_sve_svlen_s16:
8790 case SVE::BI__builtin_sve_svlen_s32:
8791 case SVE::BI__builtin_sve_svlen_s64:
8792 case SVE::BI__builtin_sve_svlen_u8:
8793 case SVE::BI__builtin_sve_svlen_u16:
8794 case SVE::BI__builtin_sve_svlen_u32:
8795 case SVE::BI__builtin_sve_svlen_u64: {
8796 SVETypeFlags TF(Builtin->TypeModifier);
8797 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8798 auto *NumEls =
8799 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
8800
8801 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
8802 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
8803 }
8804
8805 case SVE::BI__builtin_sve_svtbl2_u8:
8806 case SVE::BI__builtin_sve_svtbl2_s8:
8807 case SVE::BI__builtin_sve_svtbl2_u16:
8808 case SVE::BI__builtin_sve_svtbl2_s16:
8809 case SVE::BI__builtin_sve_svtbl2_u32:
8810 case SVE::BI__builtin_sve_svtbl2_s32:
8811 case SVE::BI__builtin_sve_svtbl2_u64:
8812 case SVE::BI__builtin_sve_svtbl2_s64:
8813 case SVE::BI__builtin_sve_svtbl2_f16:
8814 case SVE::BI__builtin_sve_svtbl2_bf16:
8815 case SVE::BI__builtin_sve_svtbl2_f32:
8816 case SVE::BI__builtin_sve_svtbl2_f64: {
8817 SVETypeFlags TF(Builtin->TypeModifier);
8818 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8819 auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
8820 Function *FExtr =
8821 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8822 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
8823 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
8824 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
8825 return Builder.CreateCall(F, {V0, V1, Ops[1]});
8826 }
8827 }
8828
8829 /// Should not happen
8830 return nullptr;
8831 }
8832
EmitAArch64BuiltinExpr(unsigned BuiltinID,const CallExpr * E,llvm::Triple::ArchType Arch)8833 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8834 const CallExpr *E,
8835 llvm::Triple::ArchType Arch) {
8836 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
8837 BuiltinID <= AArch64::LastSVEBuiltin)
8838 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
8839
8840 unsigned HintID = static_cast<unsigned>(-1);
8841 switch (BuiltinID) {
8842 default: break;
8843 case AArch64::BI__builtin_arm_nop:
8844 HintID = 0;
8845 break;
8846 case AArch64::BI__builtin_arm_yield:
8847 case AArch64::BI__yield:
8848 HintID = 1;
8849 break;
8850 case AArch64::BI__builtin_arm_wfe:
8851 case AArch64::BI__wfe:
8852 HintID = 2;
8853 break;
8854 case AArch64::BI__builtin_arm_wfi:
8855 case AArch64::BI__wfi:
8856 HintID = 3;
8857 break;
8858 case AArch64::BI__builtin_arm_sev:
8859 case AArch64::BI__sev:
8860 HintID = 4;
8861 break;
8862 case AArch64::BI__builtin_arm_sevl:
8863 case AArch64::BI__sevl:
8864 HintID = 5;
8865 break;
8866 }
8867
8868 if (HintID != static_cast<unsigned>(-1)) {
8869 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
8870 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
8871 }
8872
8873 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
8874 Value *Address = EmitScalarExpr(E->getArg(0));
8875 Value *RW = EmitScalarExpr(E->getArg(1));
8876 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
8877 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
8878 Value *IsData = EmitScalarExpr(E->getArg(4));
8879
8880 Value *Locality = nullptr;
8881 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
8882 // Temporal fetch, needs to convert cache level to locality.
8883 Locality = llvm::ConstantInt::get(Int32Ty,
8884 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
8885 } else {
8886 // Streaming fetch.
8887 Locality = llvm::ConstantInt::get(Int32Ty, 0);
8888 }
8889
8890 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
8891 // PLDL3STRM or PLDL2STRM.
8892 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
8893 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
8894 }
8895
8896 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
8897 assert((getContext().getTypeSize(E->getType()) == 32) &&
8898 "rbit of unusual size!");
8899 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8900 return Builder.CreateCall(
8901 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8902 }
8903 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
8904 assert((getContext().getTypeSize(E->getType()) == 64) &&
8905 "rbit of unusual size!");
8906 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8907 return Builder.CreateCall(
8908 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8909 }
8910
8911 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
8912 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8913 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
8914 "cls");
8915 }
8916 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
8917 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8918 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
8919 "cls");
8920 }
8921
8922 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
8923 assert((getContext().getTypeSize(E->getType()) == 32) &&
8924 "__jcvt of unusual size!");
8925 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8926 return Builder.CreateCall(
8927 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
8928 }
8929
8930 if (BuiltinID == AArch64::BI__clear_cache) {
8931 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
8932 const FunctionDecl *FD = E->getDirectCallee();
8933 Value *Ops[2];
8934 for (unsigned i = 0; i < 2; i++)
8935 Ops[i] = EmitScalarExpr(E->getArg(i));
8936 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8937 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8938 StringRef Name = FD->getName();
8939 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8940 }
8941
8942 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8943 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
8944 getContext().getTypeSize(E->getType()) == 128) {
8945 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8946 ? Intrinsic::aarch64_ldaxp
8947 : Intrinsic::aarch64_ldxp);
8948
8949 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8950 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8951 "ldxp");
8952
8953 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8954 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8955 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
8956 Val0 = Builder.CreateZExt(Val0, Int128Ty);
8957 Val1 = Builder.CreateZExt(Val1, Int128Ty);
8958
8959 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
8960 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8961 Val = Builder.CreateOr(Val, Val1);
8962 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8963 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8964 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
8965 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8966
8967 QualType Ty = E->getType();
8968 llvm::Type *RealResTy = ConvertType(Ty);
8969 llvm::Type *PtrTy = llvm::IntegerType::get(
8970 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
8971 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
8972
8973 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8974 ? Intrinsic::aarch64_ldaxr
8975 : Intrinsic::aarch64_ldxr,
8976 PtrTy);
8977 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
8978
8979 if (RealResTy->isPointerTy())
8980 return Builder.CreateIntToPtr(Val, RealResTy);
8981
8982 llvm::Type *IntResTy = llvm::IntegerType::get(
8983 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8984 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
8985 return Builder.CreateBitCast(Val, RealResTy);
8986 }
8987
8988 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
8989 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
8990 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
8991 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8992 ? Intrinsic::aarch64_stlxp
8993 : Intrinsic::aarch64_stxp);
8994 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
8995
8996 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8997 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
8998
8999 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
9000 llvm::Value *Val = Builder.CreateLoad(Tmp);
9001
9002 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
9003 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
9004 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
9005 Int8PtrTy);
9006 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
9007 }
9008
9009 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
9010 BuiltinID == AArch64::BI__builtin_arm_stlex) {
9011 Value *StoreVal = EmitScalarExpr(E->getArg(0));
9012 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
9013
9014 QualType Ty = E->getArg(0)->getType();
9015 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
9016 getContext().getTypeSize(Ty));
9017 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
9018
9019 if (StoreVal->getType()->isPointerTy())
9020 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
9021 else {
9022 llvm::Type *IntTy = llvm::IntegerType::get(
9023 getLLVMContext(),
9024 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
9025 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
9026 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
9027 }
9028
9029 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9030 ? Intrinsic::aarch64_stlxr
9031 : Intrinsic::aarch64_stxr,
9032 StoreAddr->getType());
9033 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
9034 }
9035
9036 if (BuiltinID == AArch64::BI__getReg) {
9037 Expr::EvalResult Result;
9038 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
9039 llvm_unreachable("Sema will ensure that the parameter is constant");
9040
9041 llvm::APSInt Value = Result.Val.getInt();
9042 LLVMContext &Context = CGM.getLLVMContext();
9043 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
9044
9045 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
9046 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9047 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9048
9049 llvm::Function *F =
9050 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
9051 return Builder.CreateCall(F, Metadata);
9052 }
9053
9054 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
9055 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
9056 return Builder.CreateCall(F);
9057 }
9058
9059 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
9060 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
9061 llvm::SyncScope::SingleThread);
9062
9063 // CRC32
9064 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
9065 switch (BuiltinID) {
9066 case AArch64::BI__builtin_arm_crc32b:
9067 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
9068 case AArch64::BI__builtin_arm_crc32cb:
9069 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
9070 case AArch64::BI__builtin_arm_crc32h:
9071 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
9072 case AArch64::BI__builtin_arm_crc32ch:
9073 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
9074 case AArch64::BI__builtin_arm_crc32w:
9075 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
9076 case AArch64::BI__builtin_arm_crc32cw:
9077 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
9078 case AArch64::BI__builtin_arm_crc32d:
9079 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
9080 case AArch64::BI__builtin_arm_crc32cd:
9081 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
9082 }
9083
9084 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
9085 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9086 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9087 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
9088
9089 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
9090 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
9091
9092 return Builder.CreateCall(F, {Arg0, Arg1});
9093 }
9094
9095 // Memory Tagging Extensions (MTE) Intrinsics
9096 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
9097 switch (BuiltinID) {
9098 case AArch64::BI__builtin_arm_irg:
9099 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
9100 case AArch64::BI__builtin_arm_addg:
9101 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
9102 case AArch64::BI__builtin_arm_gmi:
9103 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
9104 case AArch64::BI__builtin_arm_ldg:
9105 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
9106 case AArch64::BI__builtin_arm_stg:
9107 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
9108 case AArch64::BI__builtin_arm_subp:
9109 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
9110 }
9111
9112 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
9113 llvm::Type *T = ConvertType(E->getType());
9114
9115 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
9116 Value *Pointer = EmitScalarExpr(E->getArg(0));
9117 Value *Mask = EmitScalarExpr(E->getArg(1));
9118
9119 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9120 Mask = Builder.CreateZExt(Mask, Int64Ty);
9121 Value *RV = Builder.CreateCall(
9122 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
9123 return Builder.CreatePointerCast(RV, T);
9124 }
9125 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
9126 Value *Pointer = EmitScalarExpr(E->getArg(0));
9127 Value *TagOffset = EmitScalarExpr(E->getArg(1));
9128
9129 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9130 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
9131 Value *RV = Builder.CreateCall(
9132 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
9133 return Builder.CreatePointerCast(RV, T);
9134 }
9135 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
9136 Value *Pointer = EmitScalarExpr(E->getArg(0));
9137 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
9138
9139 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
9140 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9141 return Builder.CreateCall(
9142 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
9143 }
9144 // Although it is possible to supply a different return
9145 // address (first arg) to this intrinsic, for now we set
9146 // return address same as input address.
9147 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
9148 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9149 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9150 Value *RV = Builder.CreateCall(
9151 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9152 return Builder.CreatePointerCast(RV, T);
9153 }
9154 // Although it is possible to supply a different tag (to set)
9155 // to this intrinsic (as first arg), for now we supply
9156 // the tag that is in input address arg (common use case).
9157 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
9158 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9159 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9160 return Builder.CreateCall(
9161 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9162 }
9163 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
9164 Value *PointerA = EmitScalarExpr(E->getArg(0));
9165 Value *PointerB = EmitScalarExpr(E->getArg(1));
9166 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
9167 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
9168 return Builder.CreateCall(
9169 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
9170 }
9171 }
9172
9173 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9174 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9175 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9176 BuiltinID == AArch64::BI__builtin_arm_wsr ||
9177 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9178 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
9179
9180 SpecialRegisterAccessKind AccessKind = Write;
9181 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9182 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9183 BuiltinID == AArch64::BI__builtin_arm_rsrp)
9184 AccessKind = VolatileRead;
9185
9186 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9187 BuiltinID == AArch64::BI__builtin_arm_wsrp;
9188
9189 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
9190 BuiltinID != AArch64::BI__builtin_arm_wsr;
9191
9192 llvm::Type *ValueType;
9193 llvm::Type *RegisterType = Int64Ty;
9194 if (IsPointerBuiltin) {
9195 ValueType = VoidPtrTy;
9196 } else if (Is64Bit) {
9197 ValueType = Int64Ty;
9198 } else {
9199 ValueType = Int32Ty;
9200 }
9201
9202 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
9203 AccessKind);
9204 }
9205
9206 if (BuiltinID == AArch64::BI_ReadStatusReg ||
9207 BuiltinID == AArch64::BI_WriteStatusReg) {
9208 LLVMContext &Context = CGM.getLLVMContext();
9209
9210 unsigned SysReg =
9211 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
9212
9213 std::string SysRegStr;
9214 llvm::raw_string_ostream(SysRegStr) <<
9215 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
9216 ((SysReg >> 11) & 7) << ":" <<
9217 ((SysReg >> 7) & 15) << ":" <<
9218 ((SysReg >> 3) & 15) << ":" <<
9219 ( SysReg & 7);
9220
9221 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
9222 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9223 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9224
9225 llvm::Type *RegisterType = Int64Ty;
9226 llvm::Type *Types[] = { RegisterType };
9227
9228 if (BuiltinID == AArch64::BI_ReadStatusReg) {
9229 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
9230
9231 return Builder.CreateCall(F, Metadata);
9232 }
9233
9234 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
9235 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
9236
9237 return Builder.CreateCall(F, { Metadata, ArgValue });
9238 }
9239
9240 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
9241 llvm::Function *F =
9242 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
9243 return Builder.CreateCall(F);
9244 }
9245
9246 if (BuiltinID == AArch64::BI__builtin_sponentry) {
9247 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
9248 return Builder.CreateCall(F);
9249 }
9250
9251 // Handle MSVC intrinsics before argument evaluation to prevent double
9252 // evaluation.
9253 if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
9254 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
9255
9256 // Find out if any arguments are required to be integer constant
9257 // expressions.
9258 unsigned ICEArguments = 0;
9259 ASTContext::GetBuiltinTypeError Error;
9260 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9261 assert(Error == ASTContext::GE_None && "Should not codegen an error");
9262
9263 llvm::SmallVector<Value*, 4> Ops;
9264 Address PtrOp0 = Address::invalid();
9265 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
9266 if (i == 0) {
9267 switch (BuiltinID) {
9268 case NEON::BI__builtin_neon_vld1_v:
9269 case NEON::BI__builtin_neon_vld1q_v:
9270 case NEON::BI__builtin_neon_vld1_dup_v:
9271 case NEON::BI__builtin_neon_vld1q_dup_v:
9272 case NEON::BI__builtin_neon_vld1_lane_v:
9273 case NEON::BI__builtin_neon_vld1q_lane_v:
9274 case NEON::BI__builtin_neon_vst1_v:
9275 case NEON::BI__builtin_neon_vst1q_v:
9276 case NEON::BI__builtin_neon_vst1_lane_v:
9277 case NEON::BI__builtin_neon_vst1q_lane_v:
9278 // Get the alignment for the argument in addition to the value;
9279 // we'll use it later.
9280 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
9281 Ops.push_back(PtrOp0.getPointer());
9282 continue;
9283 }
9284 }
9285 if ((ICEArguments & (1 << i)) == 0) {
9286 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9287 } else {
9288 // If this is required to be a constant, constant fold it so that we know
9289 // that the generated intrinsic gets a ConstantInt.
9290 Ops.push_back(llvm::ConstantInt::get(
9291 getLLVMContext(),
9292 *E->getArg(i)->getIntegerConstantExpr(getContext())));
9293 }
9294 }
9295
9296 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
9297 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
9298 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
9299
9300 if (Builtin) {
9301 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
9302 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
9303 assert(Result && "SISD intrinsic should have been handled");
9304 return Result;
9305 }
9306
9307 const Expr *Arg = E->getArg(E->getNumArgs()-1);
9308 NeonTypeFlags Type(0);
9309 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
9310 // Determine the type of this overloaded NEON intrinsic.
9311 Type = NeonTypeFlags(Result->getZExtValue());
9312
9313 bool usgn = Type.isUnsigned();
9314 bool quad = Type.isQuad();
9315
9316 // Handle non-overloaded intrinsics first.
9317 switch (BuiltinID) {
9318 default: break;
9319 case NEON::BI__builtin_neon_vabsh_f16:
9320 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9321 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
9322 case NEON::BI__builtin_neon_vldrq_p128: {
9323 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9324 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
9325 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
9326 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
9327 CharUnits::fromQuantity(16));
9328 }
9329 case NEON::BI__builtin_neon_vstrq_p128: {
9330 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9331 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9332 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9333 }
9334 case NEON::BI__builtin_neon_vcvts_f32_u32:
9335 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9336 usgn = true;
9337 LLVM_FALLTHROUGH;
9338 case NEON::BI__builtin_neon_vcvts_f32_s32:
9339 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9340 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9341 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9342 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9343 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9344 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9345 if (usgn)
9346 return Builder.CreateUIToFP(Ops[0], FTy);
9347 return Builder.CreateSIToFP(Ops[0], FTy);
9348 }
9349 case NEON::BI__builtin_neon_vcvth_f16_u16:
9350 case NEON::BI__builtin_neon_vcvth_f16_u32:
9351 case NEON::BI__builtin_neon_vcvth_f16_u64:
9352 usgn = true;
9353 LLVM_FALLTHROUGH;
9354 case NEON::BI__builtin_neon_vcvth_f16_s16:
9355 case NEON::BI__builtin_neon_vcvth_f16_s32:
9356 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9357 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9358 llvm::Type *FTy = HalfTy;
9359 llvm::Type *InTy;
9360 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9361 InTy = Int64Ty;
9362 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9363 InTy = Int32Ty;
9364 else
9365 InTy = Int16Ty;
9366 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9367 if (usgn)
9368 return Builder.CreateUIToFP(Ops[0], FTy);
9369 return Builder.CreateSIToFP(Ops[0], FTy);
9370 }
9371 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9372 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9373 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9374 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9375 case NEON::BI__builtin_neon_vcvth_u16_f16:
9376 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9377 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9378 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9379 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9380 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9381 unsigned Int;
9382 llvm::Type* InTy = Int32Ty;
9383 llvm::Type* FTy = HalfTy;
9384 llvm::Type *Tys[2] = {InTy, FTy};
9385 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9386 switch (BuiltinID) {
9387 default: llvm_unreachable("missing builtin ID in switch!");
9388 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9389 Int = Intrinsic::aarch64_neon_fcvtau; break;
9390 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9391 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9392 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9393 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9394 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9395 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9396 case NEON::BI__builtin_neon_vcvth_u16_f16:
9397 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9398 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9399 Int = Intrinsic::aarch64_neon_fcvtas; break;
9400 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9401 Int = Intrinsic::aarch64_neon_fcvtms; break;
9402 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9403 Int = Intrinsic::aarch64_neon_fcvtns; break;
9404 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9405 Int = Intrinsic::aarch64_neon_fcvtps; break;
9406 case NEON::BI__builtin_neon_vcvth_s16_f16:
9407 Int = Intrinsic::aarch64_neon_fcvtzs; break;
9408 }
9409 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9410 return Builder.CreateTrunc(Ops[0], Int16Ty);
9411 }
9412 case NEON::BI__builtin_neon_vcaleh_f16:
9413 case NEON::BI__builtin_neon_vcalth_f16:
9414 case NEON::BI__builtin_neon_vcageh_f16:
9415 case NEON::BI__builtin_neon_vcagth_f16: {
9416 unsigned Int;
9417 llvm::Type* InTy = Int32Ty;
9418 llvm::Type* FTy = HalfTy;
9419 llvm::Type *Tys[2] = {InTy, FTy};
9420 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9421 switch (BuiltinID) {
9422 default: llvm_unreachable("missing builtin ID in switch!");
9423 case NEON::BI__builtin_neon_vcageh_f16:
9424 Int = Intrinsic::aarch64_neon_facge; break;
9425 case NEON::BI__builtin_neon_vcagth_f16:
9426 Int = Intrinsic::aarch64_neon_facgt; break;
9427 case NEON::BI__builtin_neon_vcaleh_f16:
9428 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9429 case NEON::BI__builtin_neon_vcalth_f16:
9430 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9431 }
9432 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9433 return Builder.CreateTrunc(Ops[0], Int16Ty);
9434 }
9435 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9436 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9437 unsigned Int;
9438 llvm::Type* InTy = Int32Ty;
9439 llvm::Type* FTy = HalfTy;
9440 llvm::Type *Tys[2] = {InTy, FTy};
9441 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9442 switch (BuiltinID) {
9443 default: llvm_unreachable("missing builtin ID in switch!");
9444 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9445 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9446 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9447 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9448 }
9449 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9450 return Builder.CreateTrunc(Ops[0], Int16Ty);
9451 }
9452 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9453 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9454 unsigned Int;
9455 llvm::Type* FTy = HalfTy;
9456 llvm::Type* InTy = Int32Ty;
9457 llvm::Type *Tys[2] = {FTy, InTy};
9458 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9459 switch (BuiltinID) {
9460 default: llvm_unreachable("missing builtin ID in switch!");
9461 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9462 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9463 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9464 break;
9465 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9466 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9467 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9468 break;
9469 }
9470 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9471 }
9472 case NEON::BI__builtin_neon_vpaddd_s64: {
9473 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9474 Value *Vec = EmitScalarExpr(E->getArg(0));
9475 // The vector is v2f64, so make sure it's bitcast to that.
9476 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9477 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9478 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9479 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9480 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9481 // Pairwise addition of a v2f64 into a scalar f64.
9482 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9483 }
9484 case NEON::BI__builtin_neon_vpaddd_f64: {
9485 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9486 Value *Vec = EmitScalarExpr(E->getArg(0));
9487 // The vector is v2f64, so make sure it's bitcast to that.
9488 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9489 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9490 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9491 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9492 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9493 // Pairwise addition of a v2f64 into a scalar f64.
9494 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9495 }
9496 case NEON::BI__builtin_neon_vpadds_f32: {
9497 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9498 Value *Vec = EmitScalarExpr(E->getArg(0));
9499 // The vector is v2f32, so make sure it's bitcast to that.
9500 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9501 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9502 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9503 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9504 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9505 // Pairwise addition of a v2f32 into a scalar f32.
9506 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9507 }
9508 case NEON::BI__builtin_neon_vceqzd_s64:
9509 case NEON::BI__builtin_neon_vceqzd_f64:
9510 case NEON::BI__builtin_neon_vceqzs_f32:
9511 case NEON::BI__builtin_neon_vceqzh_f16:
9512 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9513 return EmitAArch64CompareBuiltinExpr(
9514 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9515 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9516 case NEON::BI__builtin_neon_vcgezd_s64:
9517 case NEON::BI__builtin_neon_vcgezd_f64:
9518 case NEON::BI__builtin_neon_vcgezs_f32:
9519 case NEON::BI__builtin_neon_vcgezh_f16:
9520 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9521 return EmitAArch64CompareBuiltinExpr(
9522 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9523 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9524 case NEON::BI__builtin_neon_vclezd_s64:
9525 case NEON::BI__builtin_neon_vclezd_f64:
9526 case NEON::BI__builtin_neon_vclezs_f32:
9527 case NEON::BI__builtin_neon_vclezh_f16:
9528 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9529 return EmitAArch64CompareBuiltinExpr(
9530 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9531 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9532 case NEON::BI__builtin_neon_vcgtzd_s64:
9533 case NEON::BI__builtin_neon_vcgtzd_f64:
9534 case NEON::BI__builtin_neon_vcgtzs_f32:
9535 case NEON::BI__builtin_neon_vcgtzh_f16:
9536 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9537 return EmitAArch64CompareBuiltinExpr(
9538 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9539 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9540 case NEON::BI__builtin_neon_vcltzd_s64:
9541 case NEON::BI__builtin_neon_vcltzd_f64:
9542 case NEON::BI__builtin_neon_vcltzs_f32:
9543 case NEON::BI__builtin_neon_vcltzh_f16:
9544 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9545 return EmitAArch64CompareBuiltinExpr(
9546 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9547 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9548
9549 case NEON::BI__builtin_neon_vceqzd_u64: {
9550 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9551 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9552 Ops[0] =
9553 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9554 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9555 }
9556 case NEON::BI__builtin_neon_vceqd_f64:
9557 case NEON::BI__builtin_neon_vcled_f64:
9558 case NEON::BI__builtin_neon_vcltd_f64:
9559 case NEON::BI__builtin_neon_vcged_f64:
9560 case NEON::BI__builtin_neon_vcgtd_f64: {
9561 llvm::CmpInst::Predicate P;
9562 switch (BuiltinID) {
9563 default: llvm_unreachable("missing builtin ID in switch!");
9564 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9565 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9566 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9567 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9568 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9569 }
9570 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9571 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9572 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9573 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9574 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9575 }
9576 case NEON::BI__builtin_neon_vceqs_f32:
9577 case NEON::BI__builtin_neon_vcles_f32:
9578 case NEON::BI__builtin_neon_vclts_f32:
9579 case NEON::BI__builtin_neon_vcges_f32:
9580 case NEON::BI__builtin_neon_vcgts_f32: {
9581 llvm::CmpInst::Predicate P;
9582 switch (BuiltinID) {
9583 default: llvm_unreachable("missing builtin ID in switch!");
9584 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9585 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9586 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9587 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9588 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9589 }
9590 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9591 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
9592 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
9593 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9594 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
9595 }
9596 case NEON::BI__builtin_neon_vceqh_f16:
9597 case NEON::BI__builtin_neon_vcleh_f16:
9598 case NEON::BI__builtin_neon_vclth_f16:
9599 case NEON::BI__builtin_neon_vcgeh_f16:
9600 case NEON::BI__builtin_neon_vcgth_f16: {
9601 llvm::CmpInst::Predicate P;
9602 switch (BuiltinID) {
9603 default: llvm_unreachable("missing builtin ID in switch!");
9604 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
9605 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
9606 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
9607 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
9608 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
9609 }
9610 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9611 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9612 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
9613 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9614 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
9615 }
9616 case NEON::BI__builtin_neon_vceqd_s64:
9617 case NEON::BI__builtin_neon_vceqd_u64:
9618 case NEON::BI__builtin_neon_vcgtd_s64:
9619 case NEON::BI__builtin_neon_vcgtd_u64:
9620 case NEON::BI__builtin_neon_vcltd_s64:
9621 case NEON::BI__builtin_neon_vcltd_u64:
9622 case NEON::BI__builtin_neon_vcged_u64:
9623 case NEON::BI__builtin_neon_vcged_s64:
9624 case NEON::BI__builtin_neon_vcled_u64:
9625 case NEON::BI__builtin_neon_vcled_s64: {
9626 llvm::CmpInst::Predicate P;
9627 switch (BuiltinID) {
9628 default: llvm_unreachable("missing builtin ID in switch!");
9629 case NEON::BI__builtin_neon_vceqd_s64:
9630 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
9631 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
9632 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
9633 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
9634 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
9635 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
9636 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
9637 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
9638 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
9639 }
9640 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9641 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9642 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9643 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
9644 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
9645 }
9646 case NEON::BI__builtin_neon_vtstd_s64:
9647 case NEON::BI__builtin_neon_vtstd_u64: {
9648 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9649 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9650 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9651 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
9652 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
9653 llvm::Constant::getNullValue(Int64Ty));
9654 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
9655 }
9656 case NEON::BI__builtin_neon_vset_lane_i8:
9657 case NEON::BI__builtin_neon_vset_lane_i16:
9658 case NEON::BI__builtin_neon_vset_lane_i32:
9659 case NEON::BI__builtin_neon_vset_lane_i64:
9660 case NEON::BI__builtin_neon_vset_lane_bf16:
9661 case NEON::BI__builtin_neon_vset_lane_f32:
9662 case NEON::BI__builtin_neon_vsetq_lane_i8:
9663 case NEON::BI__builtin_neon_vsetq_lane_i16:
9664 case NEON::BI__builtin_neon_vsetq_lane_i32:
9665 case NEON::BI__builtin_neon_vsetq_lane_i64:
9666 case NEON::BI__builtin_neon_vsetq_lane_bf16:
9667 case NEON::BI__builtin_neon_vsetq_lane_f32:
9668 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9669 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9670 case NEON::BI__builtin_neon_vset_lane_f64:
9671 // The vector type needs a cast for the v1f64 variant.
9672 Ops[1] =
9673 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
9674 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9675 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9676 case NEON::BI__builtin_neon_vsetq_lane_f64:
9677 // The vector type needs a cast for the v2f64 variant.
9678 Ops[1] =
9679 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
9680 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9681 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9682
9683 case NEON::BI__builtin_neon_vget_lane_i8:
9684 case NEON::BI__builtin_neon_vdupb_lane_i8:
9685 Ops[0] =
9686 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
9687 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9688 "vget_lane");
9689 case NEON::BI__builtin_neon_vgetq_lane_i8:
9690 case NEON::BI__builtin_neon_vdupb_laneq_i8:
9691 Ops[0] =
9692 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
9693 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9694 "vgetq_lane");
9695 case NEON::BI__builtin_neon_vget_lane_i16:
9696 case NEON::BI__builtin_neon_vduph_lane_i16:
9697 Ops[0] =
9698 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
9699 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9700 "vget_lane");
9701 case NEON::BI__builtin_neon_vgetq_lane_i16:
9702 case NEON::BI__builtin_neon_vduph_laneq_i16:
9703 Ops[0] =
9704 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
9705 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9706 "vgetq_lane");
9707 case NEON::BI__builtin_neon_vget_lane_i32:
9708 case NEON::BI__builtin_neon_vdups_lane_i32:
9709 Ops[0] =
9710 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
9711 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9712 "vget_lane");
9713 case NEON::BI__builtin_neon_vdups_lane_f32:
9714 Ops[0] =
9715 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9716 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9717 "vdups_lane");
9718 case NEON::BI__builtin_neon_vgetq_lane_i32:
9719 case NEON::BI__builtin_neon_vdups_laneq_i32:
9720 Ops[0] =
9721 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
9722 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9723 "vgetq_lane");
9724 case NEON::BI__builtin_neon_vget_lane_i64:
9725 case NEON::BI__builtin_neon_vdupd_lane_i64:
9726 Ops[0] =
9727 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
9728 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9729 "vget_lane");
9730 case NEON::BI__builtin_neon_vdupd_lane_f64:
9731 Ops[0] =
9732 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9733 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9734 "vdupd_lane");
9735 case NEON::BI__builtin_neon_vgetq_lane_i64:
9736 case NEON::BI__builtin_neon_vdupd_laneq_i64:
9737 Ops[0] =
9738 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
9739 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9740 "vgetq_lane");
9741 case NEON::BI__builtin_neon_vget_lane_f32:
9742 Ops[0] =
9743 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9744 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9745 "vget_lane");
9746 case NEON::BI__builtin_neon_vget_lane_f64:
9747 Ops[0] =
9748 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9749 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9750 "vget_lane");
9751 case NEON::BI__builtin_neon_vgetq_lane_f32:
9752 case NEON::BI__builtin_neon_vdups_laneq_f32:
9753 Ops[0] =
9754 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
9755 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9756 "vgetq_lane");
9757 case NEON::BI__builtin_neon_vgetq_lane_f64:
9758 case NEON::BI__builtin_neon_vdupd_laneq_f64:
9759 Ops[0] =
9760 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
9761 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9762 "vgetq_lane");
9763 case NEON::BI__builtin_neon_vaddh_f16:
9764 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9765 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
9766 case NEON::BI__builtin_neon_vsubh_f16:
9767 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9768 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
9769 case NEON::BI__builtin_neon_vmulh_f16:
9770 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9771 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
9772 case NEON::BI__builtin_neon_vdivh_f16:
9773 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9774 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
9775 case NEON::BI__builtin_neon_vfmah_f16:
9776 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9777 return emitCallMaybeConstrainedFPBuiltin(
9778 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9779 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
9780 case NEON::BI__builtin_neon_vfmsh_f16: {
9781 // FIXME: This should be an fneg instruction:
9782 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
9783 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
9784
9785 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9786 return emitCallMaybeConstrainedFPBuiltin(
9787 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9788 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
9789 }
9790 case NEON::BI__builtin_neon_vaddd_s64:
9791 case NEON::BI__builtin_neon_vaddd_u64:
9792 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
9793 case NEON::BI__builtin_neon_vsubd_s64:
9794 case NEON::BI__builtin_neon_vsubd_u64:
9795 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
9796 case NEON::BI__builtin_neon_vqdmlalh_s16:
9797 case NEON::BI__builtin_neon_vqdmlslh_s16: {
9798 SmallVector<Value *, 2> ProductOps;
9799 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9800 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
9801 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9802 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9803 ProductOps, "vqdmlXl");
9804 Constant *CI = ConstantInt::get(SizeTy, 0);
9805 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9806
9807 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
9808 ? Intrinsic::aarch64_neon_sqadd
9809 : Intrinsic::aarch64_neon_sqsub;
9810 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
9811 }
9812 case NEON::BI__builtin_neon_vqshlud_n_s64: {
9813 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9814 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9815 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
9816 Ops, "vqshlu_n");
9817 }
9818 case NEON::BI__builtin_neon_vqshld_n_u64:
9819 case NEON::BI__builtin_neon_vqshld_n_s64: {
9820 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
9821 ? Intrinsic::aarch64_neon_uqshl
9822 : Intrinsic::aarch64_neon_sqshl;
9823 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9824 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9825 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
9826 }
9827 case NEON::BI__builtin_neon_vrshrd_n_u64:
9828 case NEON::BI__builtin_neon_vrshrd_n_s64: {
9829 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
9830 ? Intrinsic::aarch64_neon_urshl
9831 : Intrinsic::aarch64_neon_srshl;
9832 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9833 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
9834 Ops[1] = ConstantInt::get(Int64Ty, -SV);
9835 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
9836 }
9837 case NEON::BI__builtin_neon_vrsrad_n_u64:
9838 case NEON::BI__builtin_neon_vrsrad_n_s64: {
9839 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
9840 ? Intrinsic::aarch64_neon_urshl
9841 : Intrinsic::aarch64_neon_srshl;
9842 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9843 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
9844 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
9845 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
9846 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
9847 }
9848 case NEON::BI__builtin_neon_vshld_n_s64:
9849 case NEON::BI__builtin_neon_vshld_n_u64: {
9850 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9851 return Builder.CreateShl(
9852 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
9853 }
9854 case NEON::BI__builtin_neon_vshrd_n_s64: {
9855 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9856 return Builder.CreateAShr(
9857 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9858 Amt->getZExtValue())),
9859 "shrd_n");
9860 }
9861 case NEON::BI__builtin_neon_vshrd_n_u64: {
9862 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9863 uint64_t ShiftAmt = Amt->getZExtValue();
9864 // Right-shifting an unsigned value by its size yields 0.
9865 if (ShiftAmt == 64)
9866 return ConstantInt::get(Int64Ty, 0);
9867 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
9868 "shrd_n");
9869 }
9870 case NEON::BI__builtin_neon_vsrad_n_s64: {
9871 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9872 Ops[1] = Builder.CreateAShr(
9873 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9874 Amt->getZExtValue())),
9875 "shrd_n");
9876 return Builder.CreateAdd(Ops[0], Ops[1]);
9877 }
9878 case NEON::BI__builtin_neon_vsrad_n_u64: {
9879 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9880 uint64_t ShiftAmt = Amt->getZExtValue();
9881 // Right-shifting an unsigned value by its size yields 0.
9882 // As Op + 0 = Op, return Ops[0] directly.
9883 if (ShiftAmt == 64)
9884 return Ops[0];
9885 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
9886 "shrd_n");
9887 return Builder.CreateAdd(Ops[0], Ops[1]);
9888 }
9889 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
9890 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
9891 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
9892 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
9893 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9894 "lane");
9895 SmallVector<Value *, 2> ProductOps;
9896 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9897 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
9898 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9899 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9900 ProductOps, "vqdmlXl");
9901 Constant *CI = ConstantInt::get(SizeTy, 0);
9902 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9903 Ops.pop_back();
9904
9905 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
9906 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
9907 ? Intrinsic::aarch64_neon_sqadd
9908 : Intrinsic::aarch64_neon_sqsub;
9909 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
9910 }
9911 case NEON::BI__builtin_neon_vqdmlals_s32:
9912 case NEON::BI__builtin_neon_vqdmlsls_s32: {
9913 SmallVector<Value *, 2> ProductOps;
9914 ProductOps.push_back(Ops[1]);
9915 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
9916 Ops[1] =
9917 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9918 ProductOps, "vqdmlXl");
9919
9920 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
9921 ? Intrinsic::aarch64_neon_sqadd
9922 : Intrinsic::aarch64_neon_sqsub;
9923 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
9924 }
9925 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
9926 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
9927 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
9928 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
9929 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9930 "lane");
9931 SmallVector<Value *, 2> ProductOps;
9932 ProductOps.push_back(Ops[1]);
9933 ProductOps.push_back(Ops[2]);
9934 Ops[1] =
9935 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9936 ProductOps, "vqdmlXl");
9937 Ops.pop_back();
9938
9939 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
9940 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
9941 ? Intrinsic::aarch64_neon_sqadd
9942 : Intrinsic::aarch64_neon_sqsub;
9943 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
9944 }
9945 case NEON::BI__builtin_neon_vget_lane_bf16:
9946 case NEON::BI__builtin_neon_vduph_lane_bf16:
9947 case NEON::BI__builtin_neon_vduph_lane_f16: {
9948 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9949 "vget_lane");
9950 }
9951 case NEON::BI__builtin_neon_vgetq_lane_bf16:
9952 case NEON::BI__builtin_neon_vduph_laneq_bf16:
9953 case NEON::BI__builtin_neon_vduph_laneq_f16: {
9954 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9955 "vgetq_lane");
9956 }
9957
9958 case AArch64::BI_InterlockedAdd: {
9959 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9960 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9961 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
9962 AtomicRMWInst::Add, Arg0, Arg1,
9963 llvm::AtomicOrdering::SequentiallyConsistent);
9964 return Builder.CreateAdd(RMWI, Arg1);
9965 }
9966 }
9967
9968 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
9969 llvm::Type *Ty = VTy;
9970 if (!Ty)
9971 return nullptr;
9972
9973 // Not all intrinsics handled by the common case work for AArch64 yet, so only
9974 // defer to common code if it's been added to our special map.
9975 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
9976 AArch64SIMDIntrinsicsProvenSorted);
9977
9978 if (Builtin)
9979 return EmitCommonNeonBuiltinExpr(
9980 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
9981 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
9982 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
9983
9984 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
9985 return V;
9986
9987 unsigned Int;
9988 switch (BuiltinID) {
9989 default: return nullptr;
9990 case NEON::BI__builtin_neon_vbsl_v:
9991 case NEON::BI__builtin_neon_vbslq_v: {
9992 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
9993 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
9994 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
9995 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
9996
9997 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
9998 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
9999 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
10000 return Builder.CreateBitCast(Ops[0], Ty);
10001 }
10002 case NEON::BI__builtin_neon_vfma_lane_v:
10003 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
10004 // The ARM builtins (and instructions) have the addend as the first
10005 // operand, but the 'fma' intrinsics have it last. Swap it around here.
10006 Value *Addend = Ops[0];
10007 Value *Multiplicand = Ops[1];
10008 Value *LaneSource = Ops[2];
10009 Ops[0] = Multiplicand;
10010 Ops[1] = LaneSource;
10011 Ops[2] = Addend;
10012
10013 // Now adjust things to handle the lane access.
10014 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
10015 ? llvm::FixedVectorType::get(VTy->getElementType(),
10016 VTy->getNumElements() / 2)
10017 : VTy;
10018 llvm::Constant *cst = cast<Constant>(Ops[3]);
10019 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
10020 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
10021 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
10022
10023 Ops.pop_back();
10024 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
10025 : Intrinsic::fma;
10026 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
10027 }
10028 case NEON::BI__builtin_neon_vfma_laneq_v: {
10029 auto *VTy = cast<llvm::FixedVectorType>(Ty);
10030 // v1f64 fma should be mapped to Neon scalar f64 fma
10031 if (VTy && VTy->getElementType() == DoubleTy) {
10032 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10033 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10034 llvm::FixedVectorType *VTy =
10035 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
10036 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
10037 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10038 Value *Result;
10039 Result = emitCallMaybeConstrainedFPBuiltin(
10040 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
10041 DoubleTy, {Ops[1], Ops[2], Ops[0]});
10042 return Builder.CreateBitCast(Result, Ty);
10043 }
10044 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10045 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10046
10047 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
10048 VTy->getNumElements() * 2);
10049 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
10050 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
10051 cast<ConstantInt>(Ops[3]));
10052 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
10053
10054 return emitCallMaybeConstrainedFPBuiltin(
10055 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10056 {Ops[2], Ops[1], Ops[0]});
10057 }
10058 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
10059 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10060 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10061
10062 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10063 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
10064 return emitCallMaybeConstrainedFPBuiltin(
10065 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10066 {Ops[2], Ops[1], Ops[0]});
10067 }
10068 case NEON::BI__builtin_neon_vfmah_lane_f16:
10069 case NEON::BI__builtin_neon_vfmas_lane_f32:
10070 case NEON::BI__builtin_neon_vfmah_laneq_f16:
10071 case NEON::BI__builtin_neon_vfmas_laneq_f32:
10072 case NEON::BI__builtin_neon_vfmad_lane_f64:
10073 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
10074 Ops.push_back(EmitScalarExpr(E->getArg(3)));
10075 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
10076 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10077 return emitCallMaybeConstrainedFPBuiltin(
10078 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10079 {Ops[1], Ops[2], Ops[0]});
10080 }
10081 case NEON::BI__builtin_neon_vmull_v:
10082 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10083 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
10084 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
10085 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
10086 case NEON::BI__builtin_neon_vmax_v:
10087 case NEON::BI__builtin_neon_vmaxq_v:
10088 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10089 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
10090 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
10091 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
10092 case NEON::BI__builtin_neon_vmaxh_f16: {
10093 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10094 Int = Intrinsic::aarch64_neon_fmax;
10095 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
10096 }
10097 case NEON::BI__builtin_neon_vmin_v:
10098 case NEON::BI__builtin_neon_vminq_v:
10099 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10100 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
10101 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
10102 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
10103 case NEON::BI__builtin_neon_vminh_f16: {
10104 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10105 Int = Intrinsic::aarch64_neon_fmin;
10106 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
10107 }
10108 case NEON::BI__builtin_neon_vabd_v:
10109 case NEON::BI__builtin_neon_vabdq_v:
10110 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10111 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
10112 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
10113 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
10114 case NEON::BI__builtin_neon_vpadal_v:
10115 case NEON::BI__builtin_neon_vpadalq_v: {
10116 unsigned ArgElts = VTy->getNumElements();
10117 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
10118 unsigned BitWidth = EltTy->getBitWidth();
10119 auto *ArgTy = llvm::FixedVectorType::get(
10120 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
10121 llvm::Type* Tys[2] = { VTy, ArgTy };
10122 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
10123 SmallVector<llvm::Value*, 1> TmpOps;
10124 TmpOps.push_back(Ops[1]);
10125 Function *F = CGM.getIntrinsic(Int, Tys);
10126 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
10127 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
10128 return Builder.CreateAdd(tmp, addend);
10129 }
10130 case NEON::BI__builtin_neon_vpmin_v:
10131 case NEON::BI__builtin_neon_vpminq_v:
10132 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10133 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
10134 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
10135 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
10136 case NEON::BI__builtin_neon_vpmax_v:
10137 case NEON::BI__builtin_neon_vpmaxq_v:
10138 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10139 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
10140 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
10141 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
10142 case NEON::BI__builtin_neon_vminnm_v:
10143 case NEON::BI__builtin_neon_vminnmq_v:
10144 Int = Intrinsic::aarch64_neon_fminnm;
10145 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
10146 case NEON::BI__builtin_neon_vminnmh_f16:
10147 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10148 Int = Intrinsic::aarch64_neon_fminnm;
10149 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
10150 case NEON::BI__builtin_neon_vmaxnm_v:
10151 case NEON::BI__builtin_neon_vmaxnmq_v:
10152 Int = Intrinsic::aarch64_neon_fmaxnm;
10153 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
10154 case NEON::BI__builtin_neon_vmaxnmh_f16:
10155 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10156 Int = Intrinsic::aarch64_neon_fmaxnm;
10157 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10158 case NEON::BI__builtin_neon_vrecpss_f32: {
10159 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10160 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10161 Ops, "vrecps");
10162 }
10163 case NEON::BI__builtin_neon_vrecpsd_f64:
10164 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10165 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10166 Ops, "vrecps");
10167 case NEON::BI__builtin_neon_vrecpsh_f16:
10168 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10169 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10170 Ops, "vrecps");
10171 case NEON::BI__builtin_neon_vqshrun_n_v:
10172 Int = Intrinsic::aarch64_neon_sqshrun;
10173 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10174 case NEON::BI__builtin_neon_vqrshrun_n_v:
10175 Int = Intrinsic::aarch64_neon_sqrshrun;
10176 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10177 case NEON::BI__builtin_neon_vqshrn_n_v:
10178 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10179 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10180 case NEON::BI__builtin_neon_vrshrn_n_v:
10181 Int = Intrinsic::aarch64_neon_rshrn;
10182 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10183 case NEON::BI__builtin_neon_vqrshrn_n_v:
10184 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10185 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10186 case NEON::BI__builtin_neon_vrndah_f16: {
10187 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10188 Int = Builder.getIsFPConstrained()
10189 ? Intrinsic::experimental_constrained_round
10190 : Intrinsic::round;
10191 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10192 }
10193 case NEON::BI__builtin_neon_vrnda_v:
10194 case NEON::BI__builtin_neon_vrndaq_v: {
10195 Int = Builder.getIsFPConstrained()
10196 ? Intrinsic::experimental_constrained_round
10197 : Intrinsic::round;
10198 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10199 }
10200 case NEON::BI__builtin_neon_vrndih_f16: {
10201 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10202 Int = Builder.getIsFPConstrained()
10203 ? Intrinsic::experimental_constrained_nearbyint
10204 : Intrinsic::nearbyint;
10205 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10206 }
10207 case NEON::BI__builtin_neon_vrndmh_f16: {
10208 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10209 Int = Builder.getIsFPConstrained()
10210 ? Intrinsic::experimental_constrained_floor
10211 : Intrinsic::floor;
10212 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10213 }
10214 case NEON::BI__builtin_neon_vrndm_v:
10215 case NEON::BI__builtin_neon_vrndmq_v: {
10216 Int = Builder.getIsFPConstrained()
10217 ? Intrinsic::experimental_constrained_floor
10218 : Intrinsic::floor;
10219 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10220 }
10221 case NEON::BI__builtin_neon_vrndnh_f16: {
10222 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10223 Int = Intrinsic::aarch64_neon_frintn;
10224 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10225 }
10226 case NEON::BI__builtin_neon_vrndn_v:
10227 case NEON::BI__builtin_neon_vrndnq_v: {
10228 Int = Intrinsic::aarch64_neon_frintn;
10229 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10230 }
10231 case NEON::BI__builtin_neon_vrndns_f32: {
10232 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10233 Int = Intrinsic::aarch64_neon_frintn;
10234 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10235 }
10236 case NEON::BI__builtin_neon_vrndph_f16: {
10237 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10238 Int = Builder.getIsFPConstrained()
10239 ? Intrinsic::experimental_constrained_ceil
10240 : Intrinsic::ceil;
10241 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10242 }
10243 case NEON::BI__builtin_neon_vrndp_v:
10244 case NEON::BI__builtin_neon_vrndpq_v: {
10245 Int = Builder.getIsFPConstrained()
10246 ? Intrinsic::experimental_constrained_ceil
10247 : Intrinsic::ceil;
10248 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10249 }
10250 case NEON::BI__builtin_neon_vrndxh_f16: {
10251 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10252 Int = Builder.getIsFPConstrained()
10253 ? Intrinsic::experimental_constrained_rint
10254 : Intrinsic::rint;
10255 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10256 }
10257 case NEON::BI__builtin_neon_vrndx_v:
10258 case NEON::BI__builtin_neon_vrndxq_v: {
10259 Int = Builder.getIsFPConstrained()
10260 ? Intrinsic::experimental_constrained_rint
10261 : Intrinsic::rint;
10262 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10263 }
10264 case NEON::BI__builtin_neon_vrndh_f16: {
10265 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10266 Int = Builder.getIsFPConstrained()
10267 ? Intrinsic::experimental_constrained_trunc
10268 : Intrinsic::trunc;
10269 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10270 }
10271 case NEON::BI__builtin_neon_vrnd_v:
10272 case NEON::BI__builtin_neon_vrndq_v: {
10273 Int = Builder.getIsFPConstrained()
10274 ? Intrinsic::experimental_constrained_trunc
10275 : Intrinsic::trunc;
10276 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10277 }
10278 case NEON::BI__builtin_neon_vcvt_f64_v:
10279 case NEON::BI__builtin_neon_vcvtq_f64_v:
10280 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10281 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10282 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10283 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10284 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10285 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
10286 "unexpected vcvt_f64_f32 builtin");
10287 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10288 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10289
10290 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10291 }
10292 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10293 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
10294 "unexpected vcvt_f32_f64 builtin");
10295 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10296 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10297
10298 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10299 }
10300 case NEON::BI__builtin_neon_vcvt_s32_v:
10301 case NEON::BI__builtin_neon_vcvt_u32_v:
10302 case NEON::BI__builtin_neon_vcvt_s64_v:
10303 case NEON::BI__builtin_neon_vcvt_u64_v:
10304 case NEON::BI__builtin_neon_vcvt_s16_v:
10305 case NEON::BI__builtin_neon_vcvt_u16_v:
10306 case NEON::BI__builtin_neon_vcvtq_s32_v:
10307 case NEON::BI__builtin_neon_vcvtq_u32_v:
10308 case NEON::BI__builtin_neon_vcvtq_s64_v:
10309 case NEON::BI__builtin_neon_vcvtq_u64_v:
10310 case NEON::BI__builtin_neon_vcvtq_s16_v:
10311 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10312 Int =
10313 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10314 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10315 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10316 }
10317 case NEON::BI__builtin_neon_vcvta_s16_v:
10318 case NEON::BI__builtin_neon_vcvta_u16_v:
10319 case NEON::BI__builtin_neon_vcvta_s32_v:
10320 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10321 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10322 case NEON::BI__builtin_neon_vcvta_u32_v:
10323 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10324 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10325 case NEON::BI__builtin_neon_vcvta_s64_v:
10326 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10327 case NEON::BI__builtin_neon_vcvta_u64_v:
10328 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10329 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10330 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10331 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10332 }
10333 case NEON::BI__builtin_neon_vcvtm_s16_v:
10334 case NEON::BI__builtin_neon_vcvtm_s32_v:
10335 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10336 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10337 case NEON::BI__builtin_neon_vcvtm_u16_v:
10338 case NEON::BI__builtin_neon_vcvtm_u32_v:
10339 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10340 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10341 case NEON::BI__builtin_neon_vcvtm_s64_v:
10342 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10343 case NEON::BI__builtin_neon_vcvtm_u64_v:
10344 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10345 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10346 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10347 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10348 }
10349 case NEON::BI__builtin_neon_vcvtn_s16_v:
10350 case NEON::BI__builtin_neon_vcvtn_s32_v:
10351 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10352 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10353 case NEON::BI__builtin_neon_vcvtn_u16_v:
10354 case NEON::BI__builtin_neon_vcvtn_u32_v:
10355 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10356 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10357 case NEON::BI__builtin_neon_vcvtn_s64_v:
10358 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10359 case NEON::BI__builtin_neon_vcvtn_u64_v:
10360 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10361 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10362 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10363 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10364 }
10365 case NEON::BI__builtin_neon_vcvtp_s16_v:
10366 case NEON::BI__builtin_neon_vcvtp_s32_v:
10367 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10368 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10369 case NEON::BI__builtin_neon_vcvtp_u16_v:
10370 case NEON::BI__builtin_neon_vcvtp_u32_v:
10371 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10372 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10373 case NEON::BI__builtin_neon_vcvtp_s64_v:
10374 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10375 case NEON::BI__builtin_neon_vcvtp_u64_v:
10376 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10377 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10378 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10379 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10380 }
10381 case NEON::BI__builtin_neon_vmulx_v:
10382 case NEON::BI__builtin_neon_vmulxq_v: {
10383 Int = Intrinsic::aarch64_neon_fmulx;
10384 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10385 }
10386 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10387 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10388 // vmulx_lane should be mapped to Neon scalar mulx after
10389 // extracting the scalar element
10390 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10391 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10392 Ops.pop_back();
10393 Int = Intrinsic::aarch64_neon_fmulx;
10394 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10395 }
10396 case NEON::BI__builtin_neon_vmul_lane_v:
10397 case NEON::BI__builtin_neon_vmul_laneq_v: {
10398 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10399 bool Quad = false;
10400 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10401 Quad = true;
10402 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10403 llvm::FixedVectorType *VTy =
10404 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10405 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10406 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10407 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10408 return Builder.CreateBitCast(Result, Ty);
10409 }
10410 case NEON::BI__builtin_neon_vnegd_s64:
10411 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10412 case NEON::BI__builtin_neon_vnegh_f16:
10413 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10414 case NEON::BI__builtin_neon_vpmaxnm_v:
10415 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10416 Int = Intrinsic::aarch64_neon_fmaxnmp;
10417 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10418 }
10419 case NEON::BI__builtin_neon_vpminnm_v:
10420 case NEON::BI__builtin_neon_vpminnmq_v: {
10421 Int = Intrinsic::aarch64_neon_fminnmp;
10422 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10423 }
10424 case NEON::BI__builtin_neon_vsqrth_f16: {
10425 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10426 Int = Builder.getIsFPConstrained()
10427 ? Intrinsic::experimental_constrained_sqrt
10428 : Intrinsic::sqrt;
10429 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10430 }
10431 case NEON::BI__builtin_neon_vsqrt_v:
10432 case NEON::BI__builtin_neon_vsqrtq_v: {
10433 Int = Builder.getIsFPConstrained()
10434 ? Intrinsic::experimental_constrained_sqrt
10435 : Intrinsic::sqrt;
10436 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10437 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10438 }
10439 case NEON::BI__builtin_neon_vrbit_v:
10440 case NEON::BI__builtin_neon_vrbitq_v: {
10441 Int = Intrinsic::aarch64_neon_rbit;
10442 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10443 }
10444 case NEON::BI__builtin_neon_vaddv_u8:
10445 // FIXME: These are handled by the AArch64 scalar code.
10446 usgn = true;
10447 LLVM_FALLTHROUGH;
10448 case NEON::BI__builtin_neon_vaddv_s8: {
10449 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10450 Ty = Int32Ty;
10451 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10452 llvm::Type *Tys[2] = { Ty, VTy };
10453 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10454 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10455 return Builder.CreateTrunc(Ops[0], Int8Ty);
10456 }
10457 case NEON::BI__builtin_neon_vaddv_u16:
10458 usgn = true;
10459 LLVM_FALLTHROUGH;
10460 case NEON::BI__builtin_neon_vaddv_s16: {
10461 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10462 Ty = Int32Ty;
10463 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10464 llvm::Type *Tys[2] = { Ty, VTy };
10465 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10466 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10467 return Builder.CreateTrunc(Ops[0], Int16Ty);
10468 }
10469 case NEON::BI__builtin_neon_vaddvq_u8:
10470 usgn = true;
10471 LLVM_FALLTHROUGH;
10472 case NEON::BI__builtin_neon_vaddvq_s8: {
10473 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10474 Ty = Int32Ty;
10475 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10476 llvm::Type *Tys[2] = { Ty, VTy };
10477 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10478 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10479 return Builder.CreateTrunc(Ops[0], Int8Ty);
10480 }
10481 case NEON::BI__builtin_neon_vaddvq_u16:
10482 usgn = true;
10483 LLVM_FALLTHROUGH;
10484 case NEON::BI__builtin_neon_vaddvq_s16: {
10485 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10486 Ty = Int32Ty;
10487 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10488 llvm::Type *Tys[2] = { Ty, VTy };
10489 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10490 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10491 return Builder.CreateTrunc(Ops[0], Int16Ty);
10492 }
10493 case NEON::BI__builtin_neon_vmaxv_u8: {
10494 Int = Intrinsic::aarch64_neon_umaxv;
10495 Ty = Int32Ty;
10496 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10497 llvm::Type *Tys[2] = { Ty, VTy };
10498 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10499 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10500 return Builder.CreateTrunc(Ops[0], Int8Ty);
10501 }
10502 case NEON::BI__builtin_neon_vmaxv_u16: {
10503 Int = Intrinsic::aarch64_neon_umaxv;
10504 Ty = Int32Ty;
10505 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10506 llvm::Type *Tys[2] = { Ty, VTy };
10507 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10508 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10509 return Builder.CreateTrunc(Ops[0], Int16Ty);
10510 }
10511 case NEON::BI__builtin_neon_vmaxvq_u8: {
10512 Int = Intrinsic::aarch64_neon_umaxv;
10513 Ty = Int32Ty;
10514 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10515 llvm::Type *Tys[2] = { Ty, VTy };
10516 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10517 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10518 return Builder.CreateTrunc(Ops[0], Int8Ty);
10519 }
10520 case NEON::BI__builtin_neon_vmaxvq_u16: {
10521 Int = Intrinsic::aarch64_neon_umaxv;
10522 Ty = Int32Ty;
10523 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10524 llvm::Type *Tys[2] = { Ty, VTy };
10525 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10526 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10527 return Builder.CreateTrunc(Ops[0], Int16Ty);
10528 }
10529 case NEON::BI__builtin_neon_vmaxv_s8: {
10530 Int = Intrinsic::aarch64_neon_smaxv;
10531 Ty = Int32Ty;
10532 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10533 llvm::Type *Tys[2] = { Ty, VTy };
10534 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10535 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10536 return Builder.CreateTrunc(Ops[0], Int8Ty);
10537 }
10538 case NEON::BI__builtin_neon_vmaxv_s16: {
10539 Int = Intrinsic::aarch64_neon_smaxv;
10540 Ty = Int32Ty;
10541 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10542 llvm::Type *Tys[2] = { Ty, VTy };
10543 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10544 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10545 return Builder.CreateTrunc(Ops[0], Int16Ty);
10546 }
10547 case NEON::BI__builtin_neon_vmaxvq_s8: {
10548 Int = Intrinsic::aarch64_neon_smaxv;
10549 Ty = Int32Ty;
10550 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10551 llvm::Type *Tys[2] = { Ty, VTy };
10552 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10553 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10554 return Builder.CreateTrunc(Ops[0], Int8Ty);
10555 }
10556 case NEON::BI__builtin_neon_vmaxvq_s16: {
10557 Int = Intrinsic::aarch64_neon_smaxv;
10558 Ty = Int32Ty;
10559 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10560 llvm::Type *Tys[2] = { Ty, VTy };
10561 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10562 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10563 return Builder.CreateTrunc(Ops[0], Int16Ty);
10564 }
10565 case NEON::BI__builtin_neon_vmaxv_f16: {
10566 Int = Intrinsic::aarch64_neon_fmaxv;
10567 Ty = HalfTy;
10568 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10569 llvm::Type *Tys[2] = { Ty, VTy };
10570 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10571 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10572 return Builder.CreateTrunc(Ops[0], HalfTy);
10573 }
10574 case NEON::BI__builtin_neon_vmaxvq_f16: {
10575 Int = Intrinsic::aarch64_neon_fmaxv;
10576 Ty = HalfTy;
10577 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10578 llvm::Type *Tys[2] = { Ty, VTy };
10579 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10580 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10581 return Builder.CreateTrunc(Ops[0], HalfTy);
10582 }
10583 case NEON::BI__builtin_neon_vminv_u8: {
10584 Int = Intrinsic::aarch64_neon_uminv;
10585 Ty = Int32Ty;
10586 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10587 llvm::Type *Tys[2] = { Ty, VTy };
10588 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10589 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10590 return Builder.CreateTrunc(Ops[0], Int8Ty);
10591 }
10592 case NEON::BI__builtin_neon_vminv_u16: {
10593 Int = Intrinsic::aarch64_neon_uminv;
10594 Ty = Int32Ty;
10595 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10596 llvm::Type *Tys[2] = { Ty, VTy };
10597 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10598 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10599 return Builder.CreateTrunc(Ops[0], Int16Ty);
10600 }
10601 case NEON::BI__builtin_neon_vminvq_u8: {
10602 Int = Intrinsic::aarch64_neon_uminv;
10603 Ty = Int32Ty;
10604 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10605 llvm::Type *Tys[2] = { Ty, VTy };
10606 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10607 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10608 return Builder.CreateTrunc(Ops[0], Int8Ty);
10609 }
10610 case NEON::BI__builtin_neon_vminvq_u16: {
10611 Int = Intrinsic::aarch64_neon_uminv;
10612 Ty = Int32Ty;
10613 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10614 llvm::Type *Tys[2] = { Ty, VTy };
10615 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10616 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10617 return Builder.CreateTrunc(Ops[0], Int16Ty);
10618 }
10619 case NEON::BI__builtin_neon_vminv_s8: {
10620 Int = Intrinsic::aarch64_neon_sminv;
10621 Ty = Int32Ty;
10622 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10623 llvm::Type *Tys[2] = { Ty, VTy };
10624 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10625 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10626 return Builder.CreateTrunc(Ops[0], Int8Ty);
10627 }
10628 case NEON::BI__builtin_neon_vminv_s16: {
10629 Int = Intrinsic::aarch64_neon_sminv;
10630 Ty = Int32Ty;
10631 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10632 llvm::Type *Tys[2] = { Ty, VTy };
10633 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10634 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10635 return Builder.CreateTrunc(Ops[0], Int16Ty);
10636 }
10637 case NEON::BI__builtin_neon_vminvq_s8: {
10638 Int = Intrinsic::aarch64_neon_sminv;
10639 Ty = Int32Ty;
10640 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10641 llvm::Type *Tys[2] = { Ty, VTy };
10642 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10643 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10644 return Builder.CreateTrunc(Ops[0], Int8Ty);
10645 }
10646 case NEON::BI__builtin_neon_vminvq_s16: {
10647 Int = Intrinsic::aarch64_neon_sminv;
10648 Ty = Int32Ty;
10649 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10650 llvm::Type *Tys[2] = { Ty, VTy };
10651 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10652 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10653 return Builder.CreateTrunc(Ops[0], Int16Ty);
10654 }
10655 case NEON::BI__builtin_neon_vminv_f16: {
10656 Int = Intrinsic::aarch64_neon_fminv;
10657 Ty = HalfTy;
10658 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10659 llvm::Type *Tys[2] = { Ty, VTy };
10660 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10661 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10662 return Builder.CreateTrunc(Ops[0], HalfTy);
10663 }
10664 case NEON::BI__builtin_neon_vminvq_f16: {
10665 Int = Intrinsic::aarch64_neon_fminv;
10666 Ty = HalfTy;
10667 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10668 llvm::Type *Tys[2] = { Ty, VTy };
10669 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10670 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10671 return Builder.CreateTrunc(Ops[0], HalfTy);
10672 }
10673 case NEON::BI__builtin_neon_vmaxnmv_f16: {
10674 Int = Intrinsic::aarch64_neon_fmaxnmv;
10675 Ty = HalfTy;
10676 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10677 llvm::Type *Tys[2] = { Ty, VTy };
10678 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10679 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10680 return Builder.CreateTrunc(Ops[0], HalfTy);
10681 }
10682 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
10683 Int = Intrinsic::aarch64_neon_fmaxnmv;
10684 Ty = HalfTy;
10685 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10686 llvm::Type *Tys[2] = { Ty, VTy };
10687 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10688 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10689 return Builder.CreateTrunc(Ops[0], HalfTy);
10690 }
10691 case NEON::BI__builtin_neon_vminnmv_f16: {
10692 Int = Intrinsic::aarch64_neon_fminnmv;
10693 Ty = HalfTy;
10694 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10695 llvm::Type *Tys[2] = { Ty, VTy };
10696 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10697 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10698 return Builder.CreateTrunc(Ops[0], HalfTy);
10699 }
10700 case NEON::BI__builtin_neon_vminnmvq_f16: {
10701 Int = Intrinsic::aarch64_neon_fminnmv;
10702 Ty = HalfTy;
10703 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10704 llvm::Type *Tys[2] = { Ty, VTy };
10705 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10706 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10707 return Builder.CreateTrunc(Ops[0], HalfTy);
10708 }
10709 case NEON::BI__builtin_neon_vmul_n_f64: {
10710 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10711 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
10712 return Builder.CreateFMul(Ops[0], RHS);
10713 }
10714 case NEON::BI__builtin_neon_vaddlv_u8: {
10715 Int = Intrinsic::aarch64_neon_uaddlv;
10716 Ty = Int32Ty;
10717 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10718 llvm::Type *Tys[2] = { Ty, VTy };
10719 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10720 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10721 return Builder.CreateTrunc(Ops[0], Int16Ty);
10722 }
10723 case NEON::BI__builtin_neon_vaddlv_u16: {
10724 Int = Intrinsic::aarch64_neon_uaddlv;
10725 Ty = Int32Ty;
10726 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10727 llvm::Type *Tys[2] = { Ty, VTy };
10728 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10729 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10730 }
10731 case NEON::BI__builtin_neon_vaddlvq_u8: {
10732 Int = Intrinsic::aarch64_neon_uaddlv;
10733 Ty = Int32Ty;
10734 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10735 llvm::Type *Tys[2] = { Ty, VTy };
10736 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10737 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10738 return Builder.CreateTrunc(Ops[0], Int16Ty);
10739 }
10740 case NEON::BI__builtin_neon_vaddlvq_u16: {
10741 Int = Intrinsic::aarch64_neon_uaddlv;
10742 Ty = Int32Ty;
10743 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10744 llvm::Type *Tys[2] = { Ty, VTy };
10745 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10746 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10747 }
10748 case NEON::BI__builtin_neon_vaddlv_s8: {
10749 Int = Intrinsic::aarch64_neon_saddlv;
10750 Ty = Int32Ty;
10751 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10752 llvm::Type *Tys[2] = { Ty, VTy };
10753 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10754 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10755 return Builder.CreateTrunc(Ops[0], Int16Ty);
10756 }
10757 case NEON::BI__builtin_neon_vaddlv_s16: {
10758 Int = Intrinsic::aarch64_neon_saddlv;
10759 Ty = Int32Ty;
10760 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10761 llvm::Type *Tys[2] = { Ty, VTy };
10762 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10763 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10764 }
10765 case NEON::BI__builtin_neon_vaddlvq_s8: {
10766 Int = Intrinsic::aarch64_neon_saddlv;
10767 Ty = Int32Ty;
10768 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10769 llvm::Type *Tys[2] = { Ty, VTy };
10770 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10771 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10772 return Builder.CreateTrunc(Ops[0], Int16Ty);
10773 }
10774 case NEON::BI__builtin_neon_vaddlvq_s16: {
10775 Int = Intrinsic::aarch64_neon_saddlv;
10776 Ty = Int32Ty;
10777 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10778 llvm::Type *Tys[2] = { Ty, VTy };
10779 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10780 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10781 }
10782 case NEON::BI__builtin_neon_vsri_n_v:
10783 case NEON::BI__builtin_neon_vsriq_n_v: {
10784 Int = Intrinsic::aarch64_neon_vsri;
10785 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10786 return EmitNeonCall(Intrin, Ops, "vsri_n");
10787 }
10788 case NEON::BI__builtin_neon_vsli_n_v:
10789 case NEON::BI__builtin_neon_vsliq_n_v: {
10790 Int = Intrinsic::aarch64_neon_vsli;
10791 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10792 return EmitNeonCall(Intrin, Ops, "vsli_n");
10793 }
10794 case NEON::BI__builtin_neon_vsra_n_v:
10795 case NEON::BI__builtin_neon_vsraq_n_v:
10796 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10797 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
10798 return Builder.CreateAdd(Ops[0], Ops[1]);
10799 case NEON::BI__builtin_neon_vrsra_n_v:
10800 case NEON::BI__builtin_neon_vrsraq_n_v: {
10801 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
10802 SmallVector<llvm::Value*,2> TmpOps;
10803 TmpOps.push_back(Ops[1]);
10804 TmpOps.push_back(Ops[2]);
10805 Function* F = CGM.getIntrinsic(Int, Ty);
10806 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
10807 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
10808 return Builder.CreateAdd(Ops[0], tmp);
10809 }
10810 case NEON::BI__builtin_neon_vld1_v:
10811 case NEON::BI__builtin_neon_vld1q_v: {
10812 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10813 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
10814 }
10815 case NEON::BI__builtin_neon_vst1_v:
10816 case NEON::BI__builtin_neon_vst1q_v:
10817 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10818 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10819 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
10820 case NEON::BI__builtin_neon_vld1_lane_v:
10821 case NEON::BI__builtin_neon_vld1q_lane_v: {
10822 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10823 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10824 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10825 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10826 PtrOp0.getAlignment());
10827 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
10828 }
10829 case NEON::BI__builtin_neon_vld1_dup_v:
10830 case NEON::BI__builtin_neon_vld1q_dup_v: {
10831 Value *V = UndefValue::get(Ty);
10832 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10833 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10834 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10835 PtrOp0.getAlignment());
10836 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
10837 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
10838 return EmitNeonSplat(Ops[0], CI);
10839 }
10840 case NEON::BI__builtin_neon_vst1_lane_v:
10841 case NEON::BI__builtin_neon_vst1q_lane_v:
10842 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10843 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
10844 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10845 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
10846 PtrOp0.getAlignment());
10847 case NEON::BI__builtin_neon_vld2_v:
10848 case NEON::BI__builtin_neon_vld2q_v: {
10849 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10850 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10851 llvm::Type *Tys[2] = { VTy, PTy };
10852 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
10853 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10854 Ops[0] = Builder.CreateBitCast(Ops[0],
10855 llvm::PointerType::getUnqual(Ops[1]->getType()));
10856 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10857 }
10858 case NEON::BI__builtin_neon_vld3_v:
10859 case NEON::BI__builtin_neon_vld3q_v: {
10860 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10861 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10862 llvm::Type *Tys[2] = { VTy, PTy };
10863 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
10864 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10865 Ops[0] = Builder.CreateBitCast(Ops[0],
10866 llvm::PointerType::getUnqual(Ops[1]->getType()));
10867 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10868 }
10869 case NEON::BI__builtin_neon_vld4_v:
10870 case NEON::BI__builtin_neon_vld4q_v: {
10871 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10872 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10873 llvm::Type *Tys[2] = { VTy, PTy };
10874 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
10875 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10876 Ops[0] = Builder.CreateBitCast(Ops[0],
10877 llvm::PointerType::getUnqual(Ops[1]->getType()));
10878 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10879 }
10880 case NEON::BI__builtin_neon_vld2_dup_v:
10881 case NEON::BI__builtin_neon_vld2q_dup_v: {
10882 llvm::Type *PTy =
10883 llvm::PointerType::getUnqual(VTy->getElementType());
10884 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10885 llvm::Type *Tys[2] = { VTy, PTy };
10886 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
10887 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10888 Ops[0] = Builder.CreateBitCast(Ops[0],
10889 llvm::PointerType::getUnqual(Ops[1]->getType()));
10890 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10891 }
10892 case NEON::BI__builtin_neon_vld3_dup_v:
10893 case NEON::BI__builtin_neon_vld3q_dup_v: {
10894 llvm::Type *PTy =
10895 llvm::PointerType::getUnqual(VTy->getElementType());
10896 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10897 llvm::Type *Tys[2] = { VTy, PTy };
10898 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
10899 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10900 Ops[0] = Builder.CreateBitCast(Ops[0],
10901 llvm::PointerType::getUnqual(Ops[1]->getType()));
10902 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10903 }
10904 case NEON::BI__builtin_neon_vld4_dup_v:
10905 case NEON::BI__builtin_neon_vld4q_dup_v: {
10906 llvm::Type *PTy =
10907 llvm::PointerType::getUnqual(VTy->getElementType());
10908 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10909 llvm::Type *Tys[2] = { VTy, PTy };
10910 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
10911 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10912 Ops[0] = Builder.CreateBitCast(Ops[0],
10913 llvm::PointerType::getUnqual(Ops[1]->getType()));
10914 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10915 }
10916 case NEON::BI__builtin_neon_vld2_lane_v:
10917 case NEON::BI__builtin_neon_vld2q_lane_v: {
10918 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10919 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
10920 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
10921 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10922 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10923 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10924 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
10925 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10926 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10927 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10928 }
10929 case NEON::BI__builtin_neon_vld3_lane_v:
10930 case NEON::BI__builtin_neon_vld3q_lane_v: {
10931 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10932 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
10933 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
10934 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10935 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10936 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10937 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10938 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
10939 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10940 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10941 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10942 }
10943 case NEON::BI__builtin_neon_vld4_lane_v:
10944 case NEON::BI__builtin_neon_vld4q_lane_v: {
10945 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10946 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
10947 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
10948 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10949 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10950 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10951 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
10952 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
10953 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
10954 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10955 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10956 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10957 }
10958 case NEON::BI__builtin_neon_vst2_v:
10959 case NEON::BI__builtin_neon_vst2q_v: {
10960 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10961 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
10962 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
10963 Ops, "");
10964 }
10965 case NEON::BI__builtin_neon_vst2_lane_v:
10966 case NEON::BI__builtin_neon_vst2q_lane_v: {
10967 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10968 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
10969 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10970 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
10971 Ops, "");
10972 }
10973 case NEON::BI__builtin_neon_vst3_v:
10974 case NEON::BI__builtin_neon_vst3q_v: {
10975 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10976 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10977 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
10978 Ops, "");
10979 }
10980 case NEON::BI__builtin_neon_vst3_lane_v:
10981 case NEON::BI__builtin_neon_vst3q_lane_v: {
10982 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10983 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10984 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10985 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
10986 Ops, "");
10987 }
10988 case NEON::BI__builtin_neon_vst4_v:
10989 case NEON::BI__builtin_neon_vst4q_v: {
10990 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10991 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10992 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
10993 Ops, "");
10994 }
10995 case NEON::BI__builtin_neon_vst4_lane_v:
10996 case NEON::BI__builtin_neon_vst4q_lane_v: {
10997 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
10998 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10999 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
11000 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
11001 Ops, "");
11002 }
11003 case NEON::BI__builtin_neon_vtrn_v:
11004 case NEON::BI__builtin_neon_vtrnq_v: {
11005 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11006 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11007 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11008 Value *SV = nullptr;
11009
11010 for (unsigned vi = 0; vi != 2; ++vi) {
11011 SmallVector<int, 16> Indices;
11012 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11013 Indices.push_back(i+vi);
11014 Indices.push_back(i+e+vi);
11015 }
11016 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11017 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
11018 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11019 }
11020 return SV;
11021 }
11022 case NEON::BI__builtin_neon_vuzp_v:
11023 case NEON::BI__builtin_neon_vuzpq_v: {
11024 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11025 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11026 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11027 Value *SV = nullptr;
11028
11029 for (unsigned vi = 0; vi != 2; ++vi) {
11030 SmallVector<int, 16> Indices;
11031 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
11032 Indices.push_back(2*i+vi);
11033
11034 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11035 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
11036 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11037 }
11038 return SV;
11039 }
11040 case NEON::BI__builtin_neon_vzip_v:
11041 case NEON::BI__builtin_neon_vzipq_v: {
11042 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11043 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11044 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11045 Value *SV = nullptr;
11046
11047 for (unsigned vi = 0; vi != 2; ++vi) {
11048 SmallVector<int, 16> Indices;
11049 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11050 Indices.push_back((i + vi*e) >> 1);
11051 Indices.push_back(((i + vi*e) >> 1)+e);
11052 }
11053 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11054 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
11055 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11056 }
11057 return SV;
11058 }
11059 case NEON::BI__builtin_neon_vqtbl1q_v: {
11060 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
11061 Ops, "vtbl1");
11062 }
11063 case NEON::BI__builtin_neon_vqtbl2q_v: {
11064 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
11065 Ops, "vtbl2");
11066 }
11067 case NEON::BI__builtin_neon_vqtbl3q_v: {
11068 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
11069 Ops, "vtbl3");
11070 }
11071 case NEON::BI__builtin_neon_vqtbl4q_v: {
11072 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
11073 Ops, "vtbl4");
11074 }
11075 case NEON::BI__builtin_neon_vqtbx1q_v: {
11076 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
11077 Ops, "vtbx1");
11078 }
11079 case NEON::BI__builtin_neon_vqtbx2q_v: {
11080 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
11081 Ops, "vtbx2");
11082 }
11083 case NEON::BI__builtin_neon_vqtbx3q_v: {
11084 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
11085 Ops, "vtbx3");
11086 }
11087 case NEON::BI__builtin_neon_vqtbx4q_v: {
11088 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
11089 Ops, "vtbx4");
11090 }
11091 case NEON::BI__builtin_neon_vsqadd_v:
11092 case NEON::BI__builtin_neon_vsqaddq_v: {
11093 Int = Intrinsic::aarch64_neon_usqadd;
11094 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
11095 }
11096 case NEON::BI__builtin_neon_vuqadd_v:
11097 case NEON::BI__builtin_neon_vuqaddq_v: {
11098 Int = Intrinsic::aarch64_neon_suqadd;
11099 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
11100 }
11101 }
11102 }
11103
EmitBPFBuiltinExpr(unsigned BuiltinID,const CallExpr * E)11104 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
11105 const CallExpr *E) {
11106 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
11107 BuiltinID == BPF::BI__builtin_btf_type_id ||
11108 BuiltinID == BPF::BI__builtin_preserve_type_info ||
11109 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
11110 "unexpected BPF builtin");
11111
11112 // A sequence number, injected into IR builtin functions, to
11113 // prevent CSE given the only difference of the funciton
11114 // may just be the debuginfo metadata.
11115 static uint32_t BuiltinSeqNum;
11116
11117 switch (BuiltinID) {
11118 default:
11119 llvm_unreachable("Unexpected BPF builtin");
11120 case BPF::BI__builtin_preserve_field_info: {
11121 const Expr *Arg = E->getArg(0);
11122 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
11123
11124 if (!getDebugInfo()) {
11125 CGM.Error(E->getExprLoc(),
11126 "using __builtin_preserve_field_info() without -g");
11127 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11128 : EmitLValue(Arg).getPointer(*this);
11129 }
11130
11131 // Enable underlying preserve_*_access_index() generation.
11132 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
11133 IsInPreservedAIRegion = true;
11134 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11135 : EmitLValue(Arg).getPointer(*this);
11136 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
11137
11138 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11139 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
11140
11141 // Built the IR for the preserve_field_info intrinsic.
11142 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
11143 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
11144 {FieldAddr->getType()});
11145 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
11146 }
11147 case BPF::BI__builtin_btf_type_id:
11148 case BPF::BI__builtin_preserve_type_info: {
11149 if (!getDebugInfo()) {
11150 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11151 return nullptr;
11152 }
11153
11154 const Expr *Arg0 = E->getArg(0);
11155 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11156 Arg0->getType(), Arg0->getExprLoc());
11157
11158 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11159 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11160 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11161
11162 llvm::Function *FnDecl;
11163 if (BuiltinID == BPF::BI__builtin_btf_type_id)
11164 FnDecl = llvm::Intrinsic::getDeclaration(
11165 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
11166 else
11167 FnDecl = llvm::Intrinsic::getDeclaration(
11168 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
11169 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
11170 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11171 return Fn;
11172 }
11173 case BPF::BI__builtin_preserve_enum_value: {
11174 if (!getDebugInfo()) {
11175 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11176 return nullptr;
11177 }
11178
11179 const Expr *Arg0 = E->getArg(0);
11180 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11181 Arg0->getType(), Arg0->getExprLoc());
11182
11183 // Find enumerator
11184 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11185 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11186 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11187 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11188
11189 auto &InitVal = Enumerator->getInitVal();
11190 std::string InitValStr;
11191 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
11192 InitValStr = std::to_string(InitVal.getSExtValue());
11193 else
11194 InitValStr = std::to_string(InitVal.getZExtValue());
11195 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11196 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11197
11198 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11199 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11200 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11201
11202 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11203 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11204 CallInst *Fn =
11205 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11206 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11207 return Fn;
11208 }
11209 }
11210 }
11211
11212 llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value * > Ops)11213 BuildVector(ArrayRef<llvm::Value*> Ops) {
11214 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
11215 "Not a power-of-two sized vector!");
11216 bool AllConstants = true;
11217 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11218 AllConstants &= isa<Constant>(Ops[i]);
11219
11220 // If this is a constant vector, create a ConstantVector.
11221 if (AllConstants) {
11222 SmallVector<llvm::Constant*, 16> CstOps;
11223 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11224 CstOps.push_back(cast<Constant>(Ops[i]));
11225 return llvm::ConstantVector::get(CstOps);
11226 }
11227
11228 // Otherwise, insertelement the values to build the vector.
11229 Value *Result = llvm::UndefValue::get(
11230 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11231
11232 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11233 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11234
11235 return Result;
11236 }
11237
11238 // Convert the mask from an integer type to a vector of i1.
getMaskVecValue(CodeGenFunction & CGF,Value * Mask,unsigned NumElts)11239 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11240 unsigned NumElts) {
11241
11242 auto *MaskTy = llvm::FixedVectorType::get(
11243 CGF.Builder.getInt1Ty(),
11244 cast<IntegerType>(Mask->getType())->getBitWidth());
11245 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11246
11247 // If we have less than 8 elements, then the starting mask was an i8 and
11248 // we need to extract down to the right number of elements.
11249 if (NumElts < 8) {
11250 int Indices[4];
11251 for (unsigned i = 0; i != NumElts; ++i)
11252 Indices[i] = i;
11253 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11254 makeArrayRef(Indices, NumElts),
11255 "extract");
11256 }
11257 return MaskVec;
11258 }
11259
EmitX86MaskedStore(CodeGenFunction & CGF,ArrayRef<Value * > Ops,Align Alignment)11260 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11261 Align Alignment) {
11262 // Cast the pointer to right type.
11263 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11264 llvm::PointerType::getUnqual(Ops[1]->getType()));
11265
11266 Value *MaskVec = getMaskVecValue(
11267 CGF, Ops[2],
11268 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11269
11270 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11271 }
11272
EmitX86MaskedLoad(CodeGenFunction & CGF,ArrayRef<Value * > Ops,Align Alignment)11273 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11274 Align Alignment) {
11275 // Cast the pointer to right type.
11276 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11277 llvm::PointerType::getUnqual(Ops[1]->getType()));
11278
11279 Value *MaskVec = getMaskVecValue(
11280 CGF, Ops[2],
11281 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11282
11283 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11284 }
11285
EmitX86ExpandLoad(CodeGenFunction & CGF,ArrayRef<Value * > Ops)11286 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11287 ArrayRef<Value *> Ops) {
11288 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11289 llvm::Type *PtrTy = ResultTy->getElementType();
11290
11291 // Cast the pointer to element type.
11292 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11293 llvm::PointerType::getUnqual(PtrTy));
11294
11295 Value *MaskVec = getMaskVecValue(
11296 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11297
11298 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11299 ResultTy);
11300 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11301 }
11302
EmitX86CompressExpand(CodeGenFunction & CGF,ArrayRef<Value * > Ops,bool IsCompress)11303 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11304 ArrayRef<Value *> Ops,
11305 bool IsCompress) {
11306 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11307
11308 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11309
11310 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11311 : Intrinsic::x86_avx512_mask_expand;
11312 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11313 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11314 }
11315
EmitX86CompressStore(CodeGenFunction & CGF,ArrayRef<Value * > Ops)11316 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11317 ArrayRef<Value *> Ops) {
11318 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11319 llvm::Type *PtrTy = ResultTy->getElementType();
11320
11321 // Cast the pointer to element type.
11322 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11323 llvm::PointerType::getUnqual(PtrTy));
11324
11325 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11326
11327 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11328 ResultTy);
11329 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11330 }
11331
EmitX86MaskLogic(CodeGenFunction & CGF,Instruction::BinaryOps Opc,ArrayRef<Value * > Ops,bool InvertLHS=false)11332 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11333 ArrayRef<Value *> Ops,
11334 bool InvertLHS = false) {
11335 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11336 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11337 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11338
11339 if (InvertLHS)
11340 LHS = CGF.Builder.CreateNot(LHS);
11341
11342 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11343 Ops[0]->getType());
11344 }
11345
EmitX86FunnelShift(CodeGenFunction & CGF,Value * Op0,Value * Op1,Value * Amt,bool IsRight)11346 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11347 Value *Amt, bool IsRight) {
11348 llvm::Type *Ty = Op0->getType();
11349
11350 // Amount may be scalar immediate, in which case create a splat vector.
11351 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11352 // we only care about the lowest log2 bits anyway.
11353 if (Amt->getType() != Ty) {
11354 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11355 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11356 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11357 }
11358
11359 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11360 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11361 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11362 }
11363
EmitX86vpcom(CodeGenFunction & CGF,ArrayRef<Value * > Ops,bool IsSigned)11364 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11365 bool IsSigned) {
11366 Value *Op0 = Ops[0];
11367 Value *Op1 = Ops[1];
11368 llvm::Type *Ty = Op0->getType();
11369 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11370
11371 CmpInst::Predicate Pred;
11372 switch (Imm) {
11373 case 0x0:
11374 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11375 break;
11376 case 0x1:
11377 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11378 break;
11379 case 0x2:
11380 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11381 break;
11382 case 0x3:
11383 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11384 break;
11385 case 0x4:
11386 Pred = ICmpInst::ICMP_EQ;
11387 break;
11388 case 0x5:
11389 Pred = ICmpInst::ICMP_NE;
11390 break;
11391 case 0x6:
11392 return llvm::Constant::getNullValue(Ty); // FALSE
11393 case 0x7:
11394 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11395 default:
11396 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
11397 }
11398
11399 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11400 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11401 return Res;
11402 }
11403
EmitX86Select(CodeGenFunction & CGF,Value * Mask,Value * Op0,Value * Op1)11404 static Value *EmitX86Select(CodeGenFunction &CGF,
11405 Value *Mask, Value *Op0, Value *Op1) {
11406
11407 // If the mask is all ones just return first argument.
11408 if (const auto *C = dyn_cast<Constant>(Mask))
11409 if (C->isAllOnesValue())
11410 return Op0;
11411
11412 Mask = getMaskVecValue(
11413 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
11414
11415 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11416 }
11417
EmitX86ScalarSelect(CodeGenFunction & CGF,Value * Mask,Value * Op0,Value * Op1)11418 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11419 Value *Mask, Value *Op0, Value *Op1) {
11420 // If the mask is all ones just return first argument.
11421 if (const auto *C = dyn_cast<Constant>(Mask))
11422 if (C->isAllOnesValue())
11423 return Op0;
11424
11425 auto *MaskTy = llvm::FixedVectorType::get(
11426 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11427 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11428 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11429 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11430 }
11431
EmitX86MaskedCompareResult(CodeGenFunction & CGF,Value * Cmp,unsigned NumElts,Value * MaskIn)11432 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11433 unsigned NumElts, Value *MaskIn) {
11434 if (MaskIn) {
11435 const auto *C = dyn_cast<Constant>(MaskIn);
11436 if (!C || !C->isAllOnesValue())
11437 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11438 }
11439
11440 if (NumElts < 8) {
11441 int Indices[8];
11442 for (unsigned i = 0; i != NumElts; ++i)
11443 Indices[i] = i;
11444 for (unsigned i = NumElts; i != 8; ++i)
11445 Indices[i] = i % NumElts + NumElts;
11446 Cmp = CGF.Builder.CreateShuffleVector(
11447 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11448 }
11449
11450 return CGF.Builder.CreateBitCast(Cmp,
11451 IntegerType::get(CGF.getLLVMContext(),
11452 std::max(NumElts, 8U)));
11453 }
11454
EmitX86MaskedCompare(CodeGenFunction & CGF,unsigned CC,bool Signed,ArrayRef<Value * > Ops)11455 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11456 bool Signed, ArrayRef<Value *> Ops) {
11457 assert((Ops.size() == 2 || Ops.size() == 4) &&
11458 "Unexpected number of arguments");
11459 unsigned NumElts =
11460 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11461 Value *Cmp;
11462
11463 if (CC == 3) {
11464 Cmp = Constant::getNullValue(
11465 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11466 } else if (CC == 7) {
11467 Cmp = Constant::getAllOnesValue(
11468 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11469 } else {
11470 ICmpInst::Predicate Pred;
11471 switch (CC) {
11472 default: llvm_unreachable("Unknown condition code");
11473 case 0: Pred = ICmpInst::ICMP_EQ; break;
11474 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11475 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11476 case 4: Pred = ICmpInst::ICMP_NE; break;
11477 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11478 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11479 }
11480 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11481 }
11482
11483 Value *MaskIn = nullptr;
11484 if (Ops.size() == 4)
11485 MaskIn = Ops[3];
11486
11487 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11488 }
11489
EmitX86ConvertToMask(CodeGenFunction & CGF,Value * In)11490 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11491 Value *Zero = Constant::getNullValue(In->getType());
11492 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11493 }
11494
EmitX86ConvertIntToFp(CodeGenFunction & CGF,ArrayRef<Value * > Ops,bool IsSigned)11495 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
11496 ArrayRef<Value *> Ops, bool IsSigned) {
11497 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11498 llvm::Type *Ty = Ops[1]->getType();
11499
11500 Value *Res;
11501 if (Rnd != 4) {
11502 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11503 : Intrinsic::x86_avx512_uitofp_round;
11504 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11505 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11506 } else {
11507 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11508 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11509 }
11510
11511 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11512 }
11513
11514 // Lowers X86 FMA intrinsics to IR.
EmitX86FMAExpr(CodeGenFunction & CGF,ArrayRef<Value * > Ops,unsigned BuiltinID,bool IsAddSub)11515 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11516 unsigned BuiltinID, bool IsAddSub) {
11517
11518 bool Subtract = false;
11519 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11520 switch (BuiltinID) {
11521 default: break;
11522 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11523 Subtract = true;
11524 LLVM_FALLTHROUGH;
11525 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11526 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11527 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11528 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11529 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11530 Subtract = true;
11531 LLVM_FALLTHROUGH;
11532 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11533 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11534 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11535 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11536 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11537 Subtract = true;
11538 LLVM_FALLTHROUGH;
11539 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11540 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11541 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11542 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11543 break;
11544 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11545 Subtract = true;
11546 LLVM_FALLTHROUGH;
11547 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11548 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11549 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11550 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11551 break;
11552 }
11553
11554 Value *A = Ops[0];
11555 Value *B = Ops[1];
11556 Value *C = Ops[2];
11557
11558 if (Subtract)
11559 C = CGF.Builder.CreateFNeg(C);
11560
11561 Value *Res;
11562
11563 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
11564 if (IID != Intrinsic::not_intrinsic &&
11565 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
11566 IsAddSub)) {
11567 Function *Intr = CGF.CGM.getIntrinsic(IID);
11568 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
11569 } else {
11570 llvm::Type *Ty = A->getType();
11571 Function *FMA;
11572 if (CGF.Builder.getIsFPConstrained()) {
11573 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
11574 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
11575 } else {
11576 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
11577 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
11578 }
11579 }
11580
11581 // Handle any required masking.
11582 Value *MaskFalseVal = nullptr;
11583 switch (BuiltinID) {
11584 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11585 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11586 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11587 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11588 MaskFalseVal = Ops[0];
11589 break;
11590 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11591 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11592 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11593 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11594 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
11595 break;
11596 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11597 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11598 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11599 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11600 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11601 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11602 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11603 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11604 MaskFalseVal = Ops[2];
11605 break;
11606 }
11607
11608 if (MaskFalseVal)
11609 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
11610
11611 return Res;
11612 }
11613
11614 static Value *
EmitScalarFMAExpr(CodeGenFunction & CGF,MutableArrayRef<Value * > Ops,Value * Upper,bool ZeroMask=false,unsigned PTIdx=0,bool NegAcc=false)11615 EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
11616 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
11617 bool NegAcc = false) {
11618 unsigned Rnd = 4;
11619 if (Ops.size() > 4)
11620 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11621
11622 if (NegAcc)
11623 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
11624
11625 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11626 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11627 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11628 Value *Res;
11629 if (Rnd != 4) {
11630 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
11631 Intrinsic::x86_avx512_vfmadd_f32 :
11632 Intrinsic::x86_avx512_vfmadd_f64;
11633 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11634 {Ops[0], Ops[1], Ops[2], Ops[4]});
11635 } else if (CGF.Builder.getIsFPConstrained()) {
11636 Function *FMA = CGF.CGM.getIntrinsic(
11637 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
11638 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
11639 } else {
11640 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
11641 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
11642 }
11643 // If we have more than 3 arguments, we need to do masking.
11644 if (Ops.size() > 3) {
11645 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
11646 : Ops[PTIdx];
11647
11648 // If we negated the accumulator and the its the PassThru value we need to
11649 // bypass the negate. Conveniently Upper should be the same thing in this
11650 // case.
11651 if (NegAcc && PTIdx == 2)
11652 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
11653
11654 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
11655 }
11656 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
11657 }
11658
EmitX86Muldq(CodeGenFunction & CGF,bool IsSigned,ArrayRef<Value * > Ops)11659 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
11660 ArrayRef<Value *> Ops) {
11661 llvm::Type *Ty = Ops[0]->getType();
11662 // Arguments have a vXi32 type so cast to vXi64.
11663 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
11664 Ty->getPrimitiveSizeInBits() / 64);
11665 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
11666 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
11667
11668 if (IsSigned) {
11669 // Shift left then arithmetic shift right.
11670 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
11671 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
11672 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
11673 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
11674 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
11675 } else {
11676 // Clear the upper bits.
11677 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
11678 LHS = CGF.Builder.CreateAnd(LHS, Mask);
11679 RHS = CGF.Builder.CreateAnd(RHS, Mask);
11680 }
11681
11682 return CGF.Builder.CreateMul(LHS, RHS);
11683 }
11684
11685 // Emit a masked pternlog intrinsic. This only exists because the header has to
11686 // use a macro and we aren't able to pass the input argument to a pternlog
11687 // builtin and a select builtin without evaluating it twice.
EmitX86Ternlog(CodeGenFunction & CGF,bool ZeroMask,ArrayRef<Value * > Ops)11688 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
11689 ArrayRef<Value *> Ops) {
11690 llvm::Type *Ty = Ops[0]->getType();
11691
11692 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
11693 unsigned EltWidth = Ty->getScalarSizeInBits();
11694 Intrinsic::ID IID;
11695 if (VecWidth == 128 && EltWidth == 32)
11696 IID = Intrinsic::x86_avx512_pternlog_d_128;
11697 else if (VecWidth == 256 && EltWidth == 32)
11698 IID = Intrinsic::x86_avx512_pternlog_d_256;
11699 else if (VecWidth == 512 && EltWidth == 32)
11700 IID = Intrinsic::x86_avx512_pternlog_d_512;
11701 else if (VecWidth == 128 && EltWidth == 64)
11702 IID = Intrinsic::x86_avx512_pternlog_q_128;
11703 else if (VecWidth == 256 && EltWidth == 64)
11704 IID = Intrinsic::x86_avx512_pternlog_q_256;
11705 else if (VecWidth == 512 && EltWidth == 64)
11706 IID = Intrinsic::x86_avx512_pternlog_q_512;
11707 else
11708 llvm_unreachable("Unexpected intrinsic");
11709
11710 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11711 Ops.drop_back());
11712 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
11713 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
11714 }
11715
EmitX86SExtMask(CodeGenFunction & CGF,Value * Op,llvm::Type * DstTy)11716 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
11717 llvm::Type *DstTy) {
11718 unsigned NumberOfElements =
11719 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11720 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
11721 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
11722 }
11723
11724 // Emit binary intrinsic with the same type used in result/args.
EmitX86BinaryIntrinsic(CodeGenFunction & CGF,ArrayRef<Value * > Ops,Intrinsic::ID IID)11725 static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
11726 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
11727 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
11728 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
11729 }
11730
EmitX86CpuIs(const CallExpr * E)11731 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
11732 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
11733 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
11734 return EmitX86CpuIs(CPUStr);
11735 }
11736
11737 // Convert F16 halfs to floats.
EmitX86CvtF16ToFloatExpr(CodeGenFunction & CGF,ArrayRef<Value * > Ops,llvm::Type * DstTy)11738 static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
11739 ArrayRef<Value *> Ops,
11740 llvm::Type *DstTy) {
11741 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
11742 "Unknown cvtph2ps intrinsic");
11743
11744 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
11745 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
11746 Function *F =
11747 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
11748 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
11749 }
11750
11751 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11752 Value *Src = Ops[0];
11753
11754 // Extract the subvector.
11755 if (NumDstElts !=
11756 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
11757 assert(NumDstElts == 4 && "Unexpected vector size");
11758 Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
11759 ArrayRef<int>{0, 1, 2, 3});
11760 }
11761
11762 // Bitcast from vXi16 to vXf16.
11763 auto *HalfTy = llvm::FixedVectorType::get(
11764 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
11765 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
11766
11767 // Perform the fp-extension.
11768 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
11769
11770 if (Ops.size() >= 3)
11771 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11772 return Res;
11773 }
11774
11775 // Convert a BF16 to a float.
EmitX86CvtBF16ToFloatExpr(CodeGenFunction & CGF,const CallExpr * E,ArrayRef<Value * > Ops)11776 static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
11777 const CallExpr *E,
11778 ArrayRef<Value *> Ops) {
11779 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
11780 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
11781 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
11782 llvm::Type *ResultType = CGF.ConvertType(E->getType());
11783 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
11784 return BitCast;
11785 }
11786
EmitX86CpuIs(StringRef CPUStr)11787 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
11788
11789 llvm::Type *Int32Ty = Builder.getInt32Ty();
11790
11791 // Matching the struct layout from the compiler-rt/libgcc structure that is
11792 // filled in:
11793 // unsigned int __cpu_vendor;
11794 // unsigned int __cpu_type;
11795 // unsigned int __cpu_subtype;
11796 // unsigned int __cpu_features[1];
11797 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11798 llvm::ArrayType::get(Int32Ty, 1));
11799
11800 // Grab the global __cpu_model.
11801 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11802 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11803
11804 // Calculate the index needed to access the correct field based on the
11805 // range. Also adjust the expected value.
11806 unsigned Index;
11807 unsigned Value;
11808 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
11809 #define X86_VENDOR(ENUM, STRING) \
11810 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
11811 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
11812 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11813 #define X86_CPU_TYPE(ENUM, STR) \
11814 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11815 #define X86_CPU_SUBTYPE(ENUM, STR) \
11816 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
11817 #include "llvm/Support/X86TargetParser.def"
11818 .Default({0, 0});
11819 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
11820
11821 // Grab the appropriate field from __cpu_model.
11822 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
11823 ConstantInt::get(Int32Ty, Index)};
11824 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
11825 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
11826
11827 // Check the value of the field against the requested value.
11828 return Builder.CreateICmpEQ(CpuValue,
11829 llvm::ConstantInt::get(Int32Ty, Value));
11830 }
11831
EmitX86CpuSupports(const CallExpr * E)11832 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
11833 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
11834 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
11835 return EmitX86CpuSupports(FeatureStr);
11836 }
11837
11838 uint64_t
GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs)11839 CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
11840 // Processor features and mapping to processor feature value.
11841 uint64_t FeaturesMask = 0;
11842 for (const StringRef &FeatureStr : FeatureStrs) {
11843 unsigned Feature =
11844 StringSwitch<unsigned>(FeatureStr)
11845 #define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
11846 #include "llvm/Support/X86TargetParser.def"
11847 ;
11848 FeaturesMask |= (1ULL << Feature);
11849 }
11850 return FeaturesMask;
11851 }
11852
EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs)11853 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
11854 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
11855 }
11856
EmitX86CpuSupports(uint64_t FeaturesMask)11857 llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
11858 uint32_t Features1 = Lo_32(FeaturesMask);
11859 uint32_t Features2 = Hi_32(FeaturesMask);
11860
11861 Value *Result = Builder.getTrue();
11862
11863 if (Features1 != 0) {
11864 // Matching the struct layout from the compiler-rt/libgcc structure that is
11865 // filled in:
11866 // unsigned int __cpu_vendor;
11867 // unsigned int __cpu_type;
11868 // unsigned int __cpu_subtype;
11869 // unsigned int __cpu_features[1];
11870 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11871 llvm::ArrayType::get(Int32Ty, 1));
11872
11873 // Grab the global __cpu_model.
11874 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11875 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11876
11877 // Grab the first (0th) element from the field __cpu_features off of the
11878 // global in the struct STy.
11879 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
11880 Builder.getInt32(0)};
11881 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
11882 Value *Features =
11883 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
11884
11885 // Check the value of the bit corresponding to the feature requested.
11886 Value *Mask = Builder.getInt32(Features1);
11887 Value *Bitset = Builder.CreateAnd(Features, Mask);
11888 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11889 Result = Builder.CreateAnd(Result, Cmp);
11890 }
11891
11892 if (Features2 != 0) {
11893 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
11894 "__cpu_features2");
11895 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
11896
11897 Value *Features =
11898 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
11899
11900 // Check the value of the bit corresponding to the feature requested.
11901 Value *Mask = Builder.getInt32(Features2);
11902 Value *Bitset = Builder.CreateAnd(Features, Mask);
11903 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11904 Result = Builder.CreateAnd(Result, Cmp);
11905 }
11906
11907 return Result;
11908 }
11909
EmitX86CpuInit()11910 Value *CodeGenFunction::EmitX86CpuInit() {
11911 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
11912 /*Variadic*/ false);
11913 llvm::FunctionCallee Func =
11914 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
11915 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
11916 cast<llvm::GlobalValue>(Func.getCallee())
11917 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
11918 return Builder.CreateCall(Func);
11919 }
11920
EmitX86BuiltinExpr(unsigned BuiltinID,const CallExpr * E)11921 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
11922 const CallExpr *E) {
11923 if (BuiltinID == X86::BI__builtin_cpu_is)
11924 return EmitX86CpuIs(E);
11925 if (BuiltinID == X86::BI__builtin_cpu_supports)
11926 return EmitX86CpuSupports(E);
11927 if (BuiltinID == X86::BI__builtin_cpu_init)
11928 return EmitX86CpuInit();
11929
11930 // Handle MSVC intrinsics before argument evaluation to prevent double
11931 // evaluation.
11932 if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
11933 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
11934
11935 SmallVector<Value*, 4> Ops;
11936 bool IsMaskFCmp = false;
11937
11938 // Find out if any arguments are required to be integer constant expressions.
11939 unsigned ICEArguments = 0;
11940 ASTContext::GetBuiltinTypeError Error;
11941 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
11942 assert(Error == ASTContext::GE_None && "Should not codegen an error");
11943
11944 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
11945 // If this is a normal argument, just emit it as a scalar.
11946 if ((ICEArguments & (1 << i)) == 0) {
11947 Ops.push_back(EmitScalarExpr(E->getArg(i)));
11948 continue;
11949 }
11950
11951 // If this is required to be a constant, constant fold it so that we know
11952 // that the generated intrinsic gets a ConstantInt.
11953 Ops.push_back(llvm::ConstantInt::get(
11954 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
11955 }
11956
11957 // These exist so that the builtin that takes an immediate can be bounds
11958 // checked by clang to avoid passing bad immediates to the backend. Since
11959 // AVX has a larger immediate than SSE we would need separate builtins to
11960 // do the different bounds checking. Rather than create a clang specific
11961 // SSE only builtin, this implements eight separate builtins to match gcc
11962 // implementation.
11963 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
11964 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
11965 llvm::Function *F = CGM.getIntrinsic(ID);
11966 return Builder.CreateCall(F, Ops);
11967 };
11968
11969 // For the vector forms of FP comparisons, translate the builtins directly to
11970 // IR.
11971 // TODO: The builtins could be removed if the SSE header files used vector
11972 // extension comparisons directly (vector ordered/unordered may need
11973 // additional support via __builtin_isnan()).
11974 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
11975 bool IsSignaling) {
11976 Value *Cmp;
11977 if (IsSignaling)
11978 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
11979 else
11980 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
11981 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
11982 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
11983 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
11984 return Builder.CreateBitCast(Sext, FPVecTy);
11985 };
11986
11987 switch (BuiltinID) {
11988 default: return nullptr;
11989 case X86::BI_mm_prefetch: {
11990 Value *Address = Ops[0];
11991 ConstantInt *C = cast<ConstantInt>(Ops[1]);
11992 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
11993 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
11994 Value *Data = ConstantInt::get(Int32Ty, 1);
11995 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
11996 return Builder.CreateCall(F, {Address, RW, Locality, Data});
11997 }
11998 case X86::BI_mm_clflush: {
11999 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
12000 Ops[0]);
12001 }
12002 case X86::BI_mm_lfence: {
12003 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
12004 }
12005 case X86::BI_mm_mfence: {
12006 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
12007 }
12008 case X86::BI_mm_sfence: {
12009 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
12010 }
12011 case X86::BI_mm_pause: {
12012 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
12013 }
12014 case X86::BI__rdtsc: {
12015 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
12016 }
12017 case X86::BI__builtin_ia32_rdtscp: {
12018 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
12019 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12020 Ops[0]);
12021 return Builder.CreateExtractValue(Call, 0);
12022 }
12023 case X86::BI__builtin_ia32_lzcnt_u16:
12024 case X86::BI__builtin_ia32_lzcnt_u32:
12025 case X86::BI__builtin_ia32_lzcnt_u64: {
12026 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
12027 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12028 }
12029 case X86::BI__builtin_ia32_tzcnt_u16:
12030 case X86::BI__builtin_ia32_tzcnt_u32:
12031 case X86::BI__builtin_ia32_tzcnt_u64: {
12032 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
12033 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12034 }
12035 case X86::BI__builtin_ia32_undef128:
12036 case X86::BI__builtin_ia32_undef256:
12037 case X86::BI__builtin_ia32_undef512:
12038 // The x86 definition of "undef" is not the same as the LLVM definition
12039 // (PR32176). We leave optimizing away an unnecessary zero constant to the
12040 // IR optimizer and backend.
12041 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
12042 // value, we should use that here instead of a zero.
12043 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12044 case X86::BI__builtin_ia32_vec_init_v8qi:
12045 case X86::BI__builtin_ia32_vec_init_v4hi:
12046 case X86::BI__builtin_ia32_vec_init_v2si:
12047 return Builder.CreateBitCast(BuildVector(Ops),
12048 llvm::Type::getX86_MMXTy(getLLVMContext()));
12049 case X86::BI__builtin_ia32_vec_ext_v2si:
12050 case X86::BI__builtin_ia32_vec_ext_v16qi:
12051 case X86::BI__builtin_ia32_vec_ext_v8hi:
12052 case X86::BI__builtin_ia32_vec_ext_v4si:
12053 case X86::BI__builtin_ia32_vec_ext_v4sf:
12054 case X86::BI__builtin_ia32_vec_ext_v2di:
12055 case X86::BI__builtin_ia32_vec_ext_v32qi:
12056 case X86::BI__builtin_ia32_vec_ext_v16hi:
12057 case X86::BI__builtin_ia32_vec_ext_v8si:
12058 case X86::BI__builtin_ia32_vec_ext_v4di: {
12059 unsigned NumElts =
12060 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12061 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12062 Index &= NumElts - 1;
12063 // These builtins exist so we can ensure the index is an ICE and in range.
12064 // Otherwise we could just do this in the header file.
12065 return Builder.CreateExtractElement(Ops[0], Index);
12066 }
12067 case X86::BI__builtin_ia32_vec_set_v16qi:
12068 case X86::BI__builtin_ia32_vec_set_v8hi:
12069 case X86::BI__builtin_ia32_vec_set_v4si:
12070 case X86::BI__builtin_ia32_vec_set_v2di:
12071 case X86::BI__builtin_ia32_vec_set_v32qi:
12072 case X86::BI__builtin_ia32_vec_set_v16hi:
12073 case X86::BI__builtin_ia32_vec_set_v8si:
12074 case X86::BI__builtin_ia32_vec_set_v4di: {
12075 unsigned NumElts =
12076 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12077 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12078 Index &= NumElts - 1;
12079 // These builtins exist so we can ensure the index is an ICE and in range.
12080 // Otherwise we could just do this in the header file.
12081 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
12082 }
12083 case X86::BI_mm_setcsr:
12084 case X86::BI__builtin_ia32_ldmxcsr: {
12085 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
12086 Builder.CreateStore(Ops[0], Tmp);
12087 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
12088 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12089 }
12090 case X86::BI_mm_getcsr:
12091 case X86::BI__builtin_ia32_stmxcsr: {
12092 Address Tmp = CreateMemTemp(E->getType());
12093 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
12094 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12095 return Builder.CreateLoad(Tmp, "stmxcsr");
12096 }
12097 case X86::BI__builtin_ia32_xsave:
12098 case X86::BI__builtin_ia32_xsave64:
12099 case X86::BI__builtin_ia32_xrstor:
12100 case X86::BI__builtin_ia32_xrstor64:
12101 case X86::BI__builtin_ia32_xsaveopt:
12102 case X86::BI__builtin_ia32_xsaveopt64:
12103 case X86::BI__builtin_ia32_xrstors:
12104 case X86::BI__builtin_ia32_xrstors64:
12105 case X86::BI__builtin_ia32_xsavec:
12106 case X86::BI__builtin_ia32_xsavec64:
12107 case X86::BI__builtin_ia32_xsaves:
12108 case X86::BI__builtin_ia32_xsaves64:
12109 case X86::BI__builtin_ia32_xsetbv:
12110 case X86::BI_xsetbv: {
12111 Intrinsic::ID ID;
12112 #define INTRINSIC_X86_XSAVE_ID(NAME) \
12113 case X86::BI__builtin_ia32_##NAME: \
12114 ID = Intrinsic::x86_##NAME; \
12115 break
12116 switch (BuiltinID) {
12117 default: llvm_unreachable("Unsupported intrinsic!");
12118 INTRINSIC_X86_XSAVE_ID(xsave);
12119 INTRINSIC_X86_XSAVE_ID(xsave64);
12120 INTRINSIC_X86_XSAVE_ID(xrstor);
12121 INTRINSIC_X86_XSAVE_ID(xrstor64);
12122 INTRINSIC_X86_XSAVE_ID(xsaveopt);
12123 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
12124 INTRINSIC_X86_XSAVE_ID(xrstors);
12125 INTRINSIC_X86_XSAVE_ID(xrstors64);
12126 INTRINSIC_X86_XSAVE_ID(xsavec);
12127 INTRINSIC_X86_XSAVE_ID(xsavec64);
12128 INTRINSIC_X86_XSAVE_ID(xsaves);
12129 INTRINSIC_X86_XSAVE_ID(xsaves64);
12130 INTRINSIC_X86_XSAVE_ID(xsetbv);
12131 case X86::BI_xsetbv:
12132 ID = Intrinsic::x86_xsetbv;
12133 break;
12134 }
12135 #undef INTRINSIC_X86_XSAVE_ID
12136 Value *Mhi = Builder.CreateTrunc(
12137 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
12138 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
12139 Ops[1] = Mhi;
12140 Ops.push_back(Mlo);
12141 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12142 }
12143 case X86::BI__builtin_ia32_xgetbv:
12144 case X86::BI_xgetbv:
12145 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12146 case X86::BI__builtin_ia32_storedqudi128_mask:
12147 case X86::BI__builtin_ia32_storedqusi128_mask:
12148 case X86::BI__builtin_ia32_storedquhi128_mask:
12149 case X86::BI__builtin_ia32_storedquqi128_mask:
12150 case X86::BI__builtin_ia32_storeupd128_mask:
12151 case X86::BI__builtin_ia32_storeups128_mask:
12152 case X86::BI__builtin_ia32_storedqudi256_mask:
12153 case X86::BI__builtin_ia32_storedqusi256_mask:
12154 case X86::BI__builtin_ia32_storedquhi256_mask:
12155 case X86::BI__builtin_ia32_storedquqi256_mask:
12156 case X86::BI__builtin_ia32_storeupd256_mask:
12157 case X86::BI__builtin_ia32_storeups256_mask:
12158 case X86::BI__builtin_ia32_storedqudi512_mask:
12159 case X86::BI__builtin_ia32_storedqusi512_mask:
12160 case X86::BI__builtin_ia32_storedquhi512_mask:
12161 case X86::BI__builtin_ia32_storedquqi512_mask:
12162 case X86::BI__builtin_ia32_storeupd512_mask:
12163 case X86::BI__builtin_ia32_storeups512_mask:
12164 return EmitX86MaskedStore(*this, Ops, Align(1));
12165
12166 case X86::BI__builtin_ia32_storess128_mask:
12167 case X86::BI__builtin_ia32_storesd128_mask:
12168 return EmitX86MaskedStore(*this, Ops, Align(1));
12169
12170 case X86::BI__builtin_ia32_vpopcntb_128:
12171 case X86::BI__builtin_ia32_vpopcntd_128:
12172 case X86::BI__builtin_ia32_vpopcntq_128:
12173 case X86::BI__builtin_ia32_vpopcntw_128:
12174 case X86::BI__builtin_ia32_vpopcntb_256:
12175 case X86::BI__builtin_ia32_vpopcntd_256:
12176 case X86::BI__builtin_ia32_vpopcntq_256:
12177 case X86::BI__builtin_ia32_vpopcntw_256:
12178 case X86::BI__builtin_ia32_vpopcntb_512:
12179 case X86::BI__builtin_ia32_vpopcntd_512:
12180 case X86::BI__builtin_ia32_vpopcntq_512:
12181 case X86::BI__builtin_ia32_vpopcntw_512: {
12182 llvm::Type *ResultType = ConvertType(E->getType());
12183 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12184 return Builder.CreateCall(F, Ops);
12185 }
12186 case X86::BI__builtin_ia32_cvtmask2b128:
12187 case X86::BI__builtin_ia32_cvtmask2b256:
12188 case X86::BI__builtin_ia32_cvtmask2b512:
12189 case X86::BI__builtin_ia32_cvtmask2w128:
12190 case X86::BI__builtin_ia32_cvtmask2w256:
12191 case X86::BI__builtin_ia32_cvtmask2w512:
12192 case X86::BI__builtin_ia32_cvtmask2d128:
12193 case X86::BI__builtin_ia32_cvtmask2d256:
12194 case X86::BI__builtin_ia32_cvtmask2d512:
12195 case X86::BI__builtin_ia32_cvtmask2q128:
12196 case X86::BI__builtin_ia32_cvtmask2q256:
12197 case X86::BI__builtin_ia32_cvtmask2q512:
12198 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12199
12200 case X86::BI__builtin_ia32_cvtb2mask128:
12201 case X86::BI__builtin_ia32_cvtb2mask256:
12202 case X86::BI__builtin_ia32_cvtb2mask512:
12203 case X86::BI__builtin_ia32_cvtw2mask128:
12204 case X86::BI__builtin_ia32_cvtw2mask256:
12205 case X86::BI__builtin_ia32_cvtw2mask512:
12206 case X86::BI__builtin_ia32_cvtd2mask128:
12207 case X86::BI__builtin_ia32_cvtd2mask256:
12208 case X86::BI__builtin_ia32_cvtd2mask512:
12209 case X86::BI__builtin_ia32_cvtq2mask128:
12210 case X86::BI__builtin_ia32_cvtq2mask256:
12211 case X86::BI__builtin_ia32_cvtq2mask512:
12212 return EmitX86ConvertToMask(*this, Ops[0]);
12213
12214 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12215 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12216 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12217 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
12218 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12219 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12220 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12221 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
12222
12223 case X86::BI__builtin_ia32_vfmaddss3:
12224 case X86::BI__builtin_ia32_vfmaddsd3:
12225 case X86::BI__builtin_ia32_vfmaddss3_mask:
12226 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12227 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
12228 case X86::BI__builtin_ia32_vfmaddss:
12229 case X86::BI__builtin_ia32_vfmaddsd:
12230 return EmitScalarFMAExpr(*this, Ops,
12231 Constant::getNullValue(Ops[0]->getType()));
12232 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12233 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12234 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
12235 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12236 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12237 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
12238 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12239 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12240 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
12241 /*NegAcc*/true);
12242 case X86::BI__builtin_ia32_vfmaddps:
12243 case X86::BI__builtin_ia32_vfmaddpd:
12244 case X86::BI__builtin_ia32_vfmaddps256:
12245 case X86::BI__builtin_ia32_vfmaddpd256:
12246 case X86::BI__builtin_ia32_vfmaddps512_mask:
12247 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12248 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12249 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12250 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12251 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12252 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12253 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12254 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
12255 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12256 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12257 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12258 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12259 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12260 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12261 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12262 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12263 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
12264
12265 case X86::BI__builtin_ia32_movdqa32store128_mask:
12266 case X86::BI__builtin_ia32_movdqa64store128_mask:
12267 case X86::BI__builtin_ia32_storeaps128_mask:
12268 case X86::BI__builtin_ia32_storeapd128_mask:
12269 case X86::BI__builtin_ia32_movdqa32store256_mask:
12270 case X86::BI__builtin_ia32_movdqa64store256_mask:
12271 case X86::BI__builtin_ia32_storeaps256_mask:
12272 case X86::BI__builtin_ia32_storeapd256_mask:
12273 case X86::BI__builtin_ia32_movdqa32store512_mask:
12274 case X86::BI__builtin_ia32_movdqa64store512_mask:
12275 case X86::BI__builtin_ia32_storeaps512_mask:
12276 case X86::BI__builtin_ia32_storeapd512_mask:
12277 return EmitX86MaskedStore(
12278 *this, Ops,
12279 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12280
12281 case X86::BI__builtin_ia32_loadups128_mask:
12282 case X86::BI__builtin_ia32_loadups256_mask:
12283 case X86::BI__builtin_ia32_loadups512_mask:
12284 case X86::BI__builtin_ia32_loadupd128_mask:
12285 case X86::BI__builtin_ia32_loadupd256_mask:
12286 case X86::BI__builtin_ia32_loadupd512_mask:
12287 case X86::BI__builtin_ia32_loaddquqi128_mask:
12288 case X86::BI__builtin_ia32_loaddquqi256_mask:
12289 case X86::BI__builtin_ia32_loaddquqi512_mask:
12290 case X86::BI__builtin_ia32_loaddquhi128_mask:
12291 case X86::BI__builtin_ia32_loaddquhi256_mask:
12292 case X86::BI__builtin_ia32_loaddquhi512_mask:
12293 case X86::BI__builtin_ia32_loaddqusi128_mask:
12294 case X86::BI__builtin_ia32_loaddqusi256_mask:
12295 case X86::BI__builtin_ia32_loaddqusi512_mask:
12296 case X86::BI__builtin_ia32_loaddqudi128_mask:
12297 case X86::BI__builtin_ia32_loaddqudi256_mask:
12298 case X86::BI__builtin_ia32_loaddqudi512_mask:
12299 return EmitX86MaskedLoad(*this, Ops, Align(1));
12300
12301 case X86::BI__builtin_ia32_loadss128_mask:
12302 case X86::BI__builtin_ia32_loadsd128_mask:
12303 return EmitX86MaskedLoad(*this, Ops, Align(1));
12304
12305 case X86::BI__builtin_ia32_loadaps128_mask:
12306 case X86::BI__builtin_ia32_loadaps256_mask:
12307 case X86::BI__builtin_ia32_loadaps512_mask:
12308 case X86::BI__builtin_ia32_loadapd128_mask:
12309 case X86::BI__builtin_ia32_loadapd256_mask:
12310 case X86::BI__builtin_ia32_loadapd512_mask:
12311 case X86::BI__builtin_ia32_movdqa32load128_mask:
12312 case X86::BI__builtin_ia32_movdqa32load256_mask:
12313 case X86::BI__builtin_ia32_movdqa32load512_mask:
12314 case X86::BI__builtin_ia32_movdqa64load128_mask:
12315 case X86::BI__builtin_ia32_movdqa64load256_mask:
12316 case X86::BI__builtin_ia32_movdqa64load512_mask:
12317 return EmitX86MaskedLoad(
12318 *this, Ops,
12319 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12320
12321 case X86::BI__builtin_ia32_expandloaddf128_mask:
12322 case X86::BI__builtin_ia32_expandloaddf256_mask:
12323 case X86::BI__builtin_ia32_expandloaddf512_mask:
12324 case X86::BI__builtin_ia32_expandloadsf128_mask:
12325 case X86::BI__builtin_ia32_expandloadsf256_mask:
12326 case X86::BI__builtin_ia32_expandloadsf512_mask:
12327 case X86::BI__builtin_ia32_expandloaddi128_mask:
12328 case X86::BI__builtin_ia32_expandloaddi256_mask:
12329 case X86::BI__builtin_ia32_expandloaddi512_mask:
12330 case X86::BI__builtin_ia32_expandloadsi128_mask:
12331 case X86::BI__builtin_ia32_expandloadsi256_mask:
12332 case X86::BI__builtin_ia32_expandloadsi512_mask:
12333 case X86::BI__builtin_ia32_expandloadhi128_mask:
12334 case X86::BI__builtin_ia32_expandloadhi256_mask:
12335 case X86::BI__builtin_ia32_expandloadhi512_mask:
12336 case X86::BI__builtin_ia32_expandloadqi128_mask:
12337 case X86::BI__builtin_ia32_expandloadqi256_mask:
12338 case X86::BI__builtin_ia32_expandloadqi512_mask:
12339 return EmitX86ExpandLoad(*this, Ops);
12340
12341 case X86::BI__builtin_ia32_compressstoredf128_mask:
12342 case X86::BI__builtin_ia32_compressstoredf256_mask:
12343 case X86::BI__builtin_ia32_compressstoredf512_mask:
12344 case X86::BI__builtin_ia32_compressstoresf128_mask:
12345 case X86::BI__builtin_ia32_compressstoresf256_mask:
12346 case X86::BI__builtin_ia32_compressstoresf512_mask:
12347 case X86::BI__builtin_ia32_compressstoredi128_mask:
12348 case X86::BI__builtin_ia32_compressstoredi256_mask:
12349 case X86::BI__builtin_ia32_compressstoredi512_mask:
12350 case X86::BI__builtin_ia32_compressstoresi128_mask:
12351 case X86::BI__builtin_ia32_compressstoresi256_mask:
12352 case X86::BI__builtin_ia32_compressstoresi512_mask:
12353 case X86::BI__builtin_ia32_compressstorehi128_mask:
12354 case X86::BI__builtin_ia32_compressstorehi256_mask:
12355 case X86::BI__builtin_ia32_compressstorehi512_mask:
12356 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12357 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12358 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12359 return EmitX86CompressStore(*this, Ops);
12360
12361 case X86::BI__builtin_ia32_expanddf128_mask:
12362 case X86::BI__builtin_ia32_expanddf256_mask:
12363 case X86::BI__builtin_ia32_expanddf512_mask:
12364 case X86::BI__builtin_ia32_expandsf128_mask:
12365 case X86::BI__builtin_ia32_expandsf256_mask:
12366 case X86::BI__builtin_ia32_expandsf512_mask:
12367 case X86::BI__builtin_ia32_expanddi128_mask:
12368 case X86::BI__builtin_ia32_expanddi256_mask:
12369 case X86::BI__builtin_ia32_expanddi512_mask:
12370 case X86::BI__builtin_ia32_expandsi128_mask:
12371 case X86::BI__builtin_ia32_expandsi256_mask:
12372 case X86::BI__builtin_ia32_expandsi512_mask:
12373 case X86::BI__builtin_ia32_expandhi128_mask:
12374 case X86::BI__builtin_ia32_expandhi256_mask:
12375 case X86::BI__builtin_ia32_expandhi512_mask:
12376 case X86::BI__builtin_ia32_expandqi128_mask:
12377 case X86::BI__builtin_ia32_expandqi256_mask:
12378 case X86::BI__builtin_ia32_expandqi512_mask:
12379 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12380
12381 case X86::BI__builtin_ia32_compressdf128_mask:
12382 case X86::BI__builtin_ia32_compressdf256_mask:
12383 case X86::BI__builtin_ia32_compressdf512_mask:
12384 case X86::BI__builtin_ia32_compresssf128_mask:
12385 case X86::BI__builtin_ia32_compresssf256_mask:
12386 case X86::BI__builtin_ia32_compresssf512_mask:
12387 case X86::BI__builtin_ia32_compressdi128_mask:
12388 case X86::BI__builtin_ia32_compressdi256_mask:
12389 case X86::BI__builtin_ia32_compressdi512_mask:
12390 case X86::BI__builtin_ia32_compresssi128_mask:
12391 case X86::BI__builtin_ia32_compresssi256_mask:
12392 case X86::BI__builtin_ia32_compresssi512_mask:
12393 case X86::BI__builtin_ia32_compresshi128_mask:
12394 case X86::BI__builtin_ia32_compresshi256_mask:
12395 case X86::BI__builtin_ia32_compresshi512_mask:
12396 case X86::BI__builtin_ia32_compressqi128_mask:
12397 case X86::BI__builtin_ia32_compressqi256_mask:
12398 case X86::BI__builtin_ia32_compressqi512_mask:
12399 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12400
12401 case X86::BI__builtin_ia32_gather3div2df:
12402 case X86::BI__builtin_ia32_gather3div2di:
12403 case X86::BI__builtin_ia32_gather3div4df:
12404 case X86::BI__builtin_ia32_gather3div4di:
12405 case X86::BI__builtin_ia32_gather3div4sf:
12406 case X86::BI__builtin_ia32_gather3div4si:
12407 case X86::BI__builtin_ia32_gather3div8sf:
12408 case X86::BI__builtin_ia32_gather3div8si:
12409 case X86::BI__builtin_ia32_gather3siv2df:
12410 case X86::BI__builtin_ia32_gather3siv2di:
12411 case X86::BI__builtin_ia32_gather3siv4df:
12412 case X86::BI__builtin_ia32_gather3siv4di:
12413 case X86::BI__builtin_ia32_gather3siv4sf:
12414 case X86::BI__builtin_ia32_gather3siv4si:
12415 case X86::BI__builtin_ia32_gather3siv8sf:
12416 case X86::BI__builtin_ia32_gather3siv8si:
12417 case X86::BI__builtin_ia32_gathersiv8df:
12418 case X86::BI__builtin_ia32_gathersiv16sf:
12419 case X86::BI__builtin_ia32_gatherdiv8df:
12420 case X86::BI__builtin_ia32_gatherdiv16sf:
12421 case X86::BI__builtin_ia32_gathersiv8di:
12422 case X86::BI__builtin_ia32_gathersiv16si:
12423 case X86::BI__builtin_ia32_gatherdiv8di:
12424 case X86::BI__builtin_ia32_gatherdiv16si: {
12425 Intrinsic::ID IID;
12426 switch (BuiltinID) {
12427 default: llvm_unreachable("Unexpected builtin");
12428 case X86::BI__builtin_ia32_gather3div2df:
12429 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12430 break;
12431 case X86::BI__builtin_ia32_gather3div2di:
12432 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12433 break;
12434 case X86::BI__builtin_ia32_gather3div4df:
12435 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12436 break;
12437 case X86::BI__builtin_ia32_gather3div4di:
12438 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12439 break;
12440 case X86::BI__builtin_ia32_gather3div4sf:
12441 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12442 break;
12443 case X86::BI__builtin_ia32_gather3div4si:
12444 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12445 break;
12446 case X86::BI__builtin_ia32_gather3div8sf:
12447 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12448 break;
12449 case X86::BI__builtin_ia32_gather3div8si:
12450 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12451 break;
12452 case X86::BI__builtin_ia32_gather3siv2df:
12453 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12454 break;
12455 case X86::BI__builtin_ia32_gather3siv2di:
12456 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12457 break;
12458 case X86::BI__builtin_ia32_gather3siv4df:
12459 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12460 break;
12461 case X86::BI__builtin_ia32_gather3siv4di:
12462 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12463 break;
12464 case X86::BI__builtin_ia32_gather3siv4sf:
12465 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12466 break;
12467 case X86::BI__builtin_ia32_gather3siv4si:
12468 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12469 break;
12470 case X86::BI__builtin_ia32_gather3siv8sf:
12471 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12472 break;
12473 case X86::BI__builtin_ia32_gather3siv8si:
12474 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12475 break;
12476 case X86::BI__builtin_ia32_gathersiv8df:
12477 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12478 break;
12479 case X86::BI__builtin_ia32_gathersiv16sf:
12480 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12481 break;
12482 case X86::BI__builtin_ia32_gatherdiv8df:
12483 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12484 break;
12485 case X86::BI__builtin_ia32_gatherdiv16sf:
12486 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12487 break;
12488 case X86::BI__builtin_ia32_gathersiv8di:
12489 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12490 break;
12491 case X86::BI__builtin_ia32_gathersiv16si:
12492 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12493 break;
12494 case X86::BI__builtin_ia32_gatherdiv8di:
12495 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12496 break;
12497 case X86::BI__builtin_ia32_gatherdiv16si:
12498 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12499 break;
12500 }
12501
12502 unsigned MinElts = std::min(
12503 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
12504 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
12505 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12506 Function *Intr = CGM.getIntrinsic(IID);
12507 return Builder.CreateCall(Intr, Ops);
12508 }
12509
12510 case X86::BI__builtin_ia32_scattersiv8df:
12511 case X86::BI__builtin_ia32_scattersiv16sf:
12512 case X86::BI__builtin_ia32_scatterdiv8df:
12513 case X86::BI__builtin_ia32_scatterdiv16sf:
12514 case X86::BI__builtin_ia32_scattersiv8di:
12515 case X86::BI__builtin_ia32_scattersiv16si:
12516 case X86::BI__builtin_ia32_scatterdiv8di:
12517 case X86::BI__builtin_ia32_scatterdiv16si:
12518 case X86::BI__builtin_ia32_scatterdiv2df:
12519 case X86::BI__builtin_ia32_scatterdiv2di:
12520 case X86::BI__builtin_ia32_scatterdiv4df:
12521 case X86::BI__builtin_ia32_scatterdiv4di:
12522 case X86::BI__builtin_ia32_scatterdiv4sf:
12523 case X86::BI__builtin_ia32_scatterdiv4si:
12524 case X86::BI__builtin_ia32_scatterdiv8sf:
12525 case X86::BI__builtin_ia32_scatterdiv8si:
12526 case X86::BI__builtin_ia32_scattersiv2df:
12527 case X86::BI__builtin_ia32_scattersiv2di:
12528 case X86::BI__builtin_ia32_scattersiv4df:
12529 case X86::BI__builtin_ia32_scattersiv4di:
12530 case X86::BI__builtin_ia32_scattersiv4sf:
12531 case X86::BI__builtin_ia32_scattersiv4si:
12532 case X86::BI__builtin_ia32_scattersiv8sf:
12533 case X86::BI__builtin_ia32_scattersiv8si: {
12534 Intrinsic::ID IID;
12535 switch (BuiltinID) {
12536 default: llvm_unreachable("Unexpected builtin");
12537 case X86::BI__builtin_ia32_scattersiv8df:
12538 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12539 break;
12540 case X86::BI__builtin_ia32_scattersiv16sf:
12541 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12542 break;
12543 case X86::BI__builtin_ia32_scatterdiv8df:
12544 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12545 break;
12546 case X86::BI__builtin_ia32_scatterdiv16sf:
12547 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12548 break;
12549 case X86::BI__builtin_ia32_scattersiv8di:
12550 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12551 break;
12552 case X86::BI__builtin_ia32_scattersiv16si:
12553 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12554 break;
12555 case X86::BI__builtin_ia32_scatterdiv8di:
12556 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
12557 break;
12558 case X86::BI__builtin_ia32_scatterdiv16si:
12559 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
12560 break;
12561 case X86::BI__builtin_ia32_scatterdiv2df:
12562 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
12563 break;
12564 case X86::BI__builtin_ia32_scatterdiv2di:
12565 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
12566 break;
12567 case X86::BI__builtin_ia32_scatterdiv4df:
12568 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
12569 break;
12570 case X86::BI__builtin_ia32_scatterdiv4di:
12571 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
12572 break;
12573 case X86::BI__builtin_ia32_scatterdiv4sf:
12574 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
12575 break;
12576 case X86::BI__builtin_ia32_scatterdiv4si:
12577 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
12578 break;
12579 case X86::BI__builtin_ia32_scatterdiv8sf:
12580 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
12581 break;
12582 case X86::BI__builtin_ia32_scatterdiv8si:
12583 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
12584 break;
12585 case X86::BI__builtin_ia32_scattersiv2df:
12586 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
12587 break;
12588 case X86::BI__builtin_ia32_scattersiv2di:
12589 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
12590 break;
12591 case X86::BI__builtin_ia32_scattersiv4df:
12592 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
12593 break;
12594 case X86::BI__builtin_ia32_scattersiv4di:
12595 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
12596 break;
12597 case X86::BI__builtin_ia32_scattersiv4sf:
12598 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
12599 break;
12600 case X86::BI__builtin_ia32_scattersiv4si:
12601 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
12602 break;
12603 case X86::BI__builtin_ia32_scattersiv8sf:
12604 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
12605 break;
12606 case X86::BI__builtin_ia32_scattersiv8si:
12607 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
12608 break;
12609 }
12610
12611 unsigned MinElts = std::min(
12612 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
12613 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
12614 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
12615 Function *Intr = CGM.getIntrinsic(IID);
12616 return Builder.CreateCall(Intr, Ops);
12617 }
12618
12619 case X86::BI__builtin_ia32_vextractf128_pd256:
12620 case X86::BI__builtin_ia32_vextractf128_ps256:
12621 case X86::BI__builtin_ia32_vextractf128_si256:
12622 case X86::BI__builtin_ia32_extract128i256:
12623 case X86::BI__builtin_ia32_extractf64x4_mask:
12624 case X86::BI__builtin_ia32_extractf32x4_mask:
12625 case X86::BI__builtin_ia32_extracti64x4_mask:
12626 case X86::BI__builtin_ia32_extracti32x4_mask:
12627 case X86::BI__builtin_ia32_extractf32x8_mask:
12628 case X86::BI__builtin_ia32_extracti32x8_mask:
12629 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12630 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12631 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12632 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12633 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12634 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
12635 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
12636 unsigned NumElts = DstTy->getNumElements();
12637 unsigned SrcNumElts =
12638 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12639 unsigned SubVectors = SrcNumElts / NumElts;
12640 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12641 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12642 Index &= SubVectors - 1; // Remove any extra bits.
12643 Index *= NumElts;
12644
12645 int Indices[16];
12646 for (unsigned i = 0; i != NumElts; ++i)
12647 Indices[i] = i + Index;
12648
12649 Value *Res = Builder.CreateShuffleVector(Ops[0],
12650 UndefValue::get(Ops[0]->getType()),
12651 makeArrayRef(Indices, NumElts),
12652 "extract");
12653
12654 if (Ops.size() == 4)
12655 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
12656
12657 return Res;
12658 }
12659 case X86::BI__builtin_ia32_vinsertf128_pd256:
12660 case X86::BI__builtin_ia32_vinsertf128_ps256:
12661 case X86::BI__builtin_ia32_vinsertf128_si256:
12662 case X86::BI__builtin_ia32_insert128i256:
12663 case X86::BI__builtin_ia32_insertf64x4:
12664 case X86::BI__builtin_ia32_insertf32x4:
12665 case X86::BI__builtin_ia32_inserti64x4:
12666 case X86::BI__builtin_ia32_inserti32x4:
12667 case X86::BI__builtin_ia32_insertf32x8:
12668 case X86::BI__builtin_ia32_inserti32x8:
12669 case X86::BI__builtin_ia32_insertf32x4_256:
12670 case X86::BI__builtin_ia32_inserti32x4_256:
12671 case X86::BI__builtin_ia32_insertf64x2_256:
12672 case X86::BI__builtin_ia32_inserti64x2_256:
12673 case X86::BI__builtin_ia32_insertf64x2_512:
12674 case X86::BI__builtin_ia32_inserti64x2_512: {
12675 unsigned DstNumElts =
12676 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12677 unsigned SrcNumElts =
12678 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
12679 unsigned SubVectors = DstNumElts / SrcNumElts;
12680 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12681 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12682 Index &= SubVectors - 1; // Remove any extra bits.
12683 Index *= SrcNumElts;
12684
12685 int Indices[16];
12686 for (unsigned i = 0; i != DstNumElts; ++i)
12687 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
12688
12689 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
12690 UndefValue::get(Ops[1]->getType()),
12691 makeArrayRef(Indices, DstNumElts),
12692 "widen");
12693
12694 for (unsigned i = 0; i != DstNumElts; ++i) {
12695 if (i >= Index && i < (Index + SrcNumElts))
12696 Indices[i] = (i - Index) + DstNumElts;
12697 else
12698 Indices[i] = i;
12699 }
12700
12701 return Builder.CreateShuffleVector(Ops[0], Op1,
12702 makeArrayRef(Indices, DstNumElts),
12703 "insert");
12704 }
12705 case X86::BI__builtin_ia32_pmovqd512_mask:
12706 case X86::BI__builtin_ia32_pmovwb512_mask: {
12707 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12708 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12709 }
12710 case X86::BI__builtin_ia32_pmovdb512_mask:
12711 case X86::BI__builtin_ia32_pmovdw512_mask:
12712 case X86::BI__builtin_ia32_pmovqw512_mask: {
12713 if (const auto *C = dyn_cast<Constant>(Ops[2]))
12714 if (C->isAllOnesValue())
12715 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12716
12717 Intrinsic::ID IID;
12718 switch (BuiltinID) {
12719 default: llvm_unreachable("Unsupported intrinsic!");
12720 case X86::BI__builtin_ia32_pmovdb512_mask:
12721 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
12722 break;
12723 case X86::BI__builtin_ia32_pmovdw512_mask:
12724 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
12725 break;
12726 case X86::BI__builtin_ia32_pmovqw512_mask:
12727 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
12728 break;
12729 }
12730
12731 Function *Intr = CGM.getIntrinsic(IID);
12732 return Builder.CreateCall(Intr, Ops);
12733 }
12734 case X86::BI__builtin_ia32_pblendw128:
12735 case X86::BI__builtin_ia32_blendpd:
12736 case X86::BI__builtin_ia32_blendps:
12737 case X86::BI__builtin_ia32_blendpd256:
12738 case X86::BI__builtin_ia32_blendps256:
12739 case X86::BI__builtin_ia32_pblendw256:
12740 case X86::BI__builtin_ia32_pblendd128:
12741 case X86::BI__builtin_ia32_pblendd256: {
12742 unsigned NumElts =
12743 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12744 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12745
12746 int Indices[16];
12747 // If there are more than 8 elements, the immediate is used twice so make
12748 // sure we handle that.
12749 for (unsigned i = 0; i != NumElts; ++i)
12750 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
12751
12752 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12753 makeArrayRef(Indices, NumElts),
12754 "blend");
12755 }
12756 case X86::BI__builtin_ia32_pshuflw:
12757 case X86::BI__builtin_ia32_pshuflw256:
12758 case X86::BI__builtin_ia32_pshuflw512: {
12759 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12760 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12761 unsigned NumElts = Ty->getNumElements();
12762
12763 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12764 Imm = (Imm & 0xff) * 0x01010101;
12765
12766 int Indices[32];
12767 for (unsigned l = 0; l != NumElts; l += 8) {
12768 for (unsigned i = 0; i != 4; ++i) {
12769 Indices[l + i] = l + (Imm & 3);
12770 Imm >>= 2;
12771 }
12772 for (unsigned i = 4; i != 8; ++i)
12773 Indices[l + i] = l + i;
12774 }
12775
12776 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12777 makeArrayRef(Indices, NumElts),
12778 "pshuflw");
12779 }
12780 case X86::BI__builtin_ia32_pshufhw:
12781 case X86::BI__builtin_ia32_pshufhw256:
12782 case X86::BI__builtin_ia32_pshufhw512: {
12783 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12784 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12785 unsigned NumElts = Ty->getNumElements();
12786
12787 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12788 Imm = (Imm & 0xff) * 0x01010101;
12789
12790 int Indices[32];
12791 for (unsigned l = 0; l != NumElts; l += 8) {
12792 for (unsigned i = 0; i != 4; ++i)
12793 Indices[l + i] = l + i;
12794 for (unsigned i = 4; i != 8; ++i) {
12795 Indices[l + i] = l + 4 + (Imm & 3);
12796 Imm >>= 2;
12797 }
12798 }
12799
12800 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12801 makeArrayRef(Indices, NumElts),
12802 "pshufhw");
12803 }
12804 case X86::BI__builtin_ia32_pshufd:
12805 case X86::BI__builtin_ia32_pshufd256:
12806 case X86::BI__builtin_ia32_pshufd512:
12807 case X86::BI__builtin_ia32_vpermilpd:
12808 case X86::BI__builtin_ia32_vpermilps:
12809 case X86::BI__builtin_ia32_vpermilpd256:
12810 case X86::BI__builtin_ia32_vpermilps256:
12811 case X86::BI__builtin_ia32_vpermilpd512:
12812 case X86::BI__builtin_ia32_vpermilps512: {
12813 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12814 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12815 unsigned NumElts = Ty->getNumElements();
12816 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12817 unsigned NumLaneElts = NumElts / NumLanes;
12818
12819 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12820 Imm = (Imm & 0xff) * 0x01010101;
12821
12822 int Indices[16];
12823 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12824 for (unsigned i = 0; i != NumLaneElts; ++i) {
12825 Indices[i + l] = (Imm % NumLaneElts) + l;
12826 Imm /= NumLaneElts;
12827 }
12828 }
12829
12830 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12831 makeArrayRef(Indices, NumElts),
12832 "permil");
12833 }
12834 case X86::BI__builtin_ia32_shufpd:
12835 case X86::BI__builtin_ia32_shufpd256:
12836 case X86::BI__builtin_ia32_shufpd512:
12837 case X86::BI__builtin_ia32_shufps:
12838 case X86::BI__builtin_ia32_shufps256:
12839 case X86::BI__builtin_ia32_shufps512: {
12840 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12841 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12842 unsigned NumElts = Ty->getNumElements();
12843 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12844 unsigned NumLaneElts = NumElts / NumLanes;
12845
12846 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12847 Imm = (Imm & 0xff) * 0x01010101;
12848
12849 int Indices[16];
12850 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12851 for (unsigned i = 0; i != NumLaneElts; ++i) {
12852 unsigned Index = Imm % NumLaneElts;
12853 Imm /= NumLaneElts;
12854 if (i >= (NumLaneElts / 2))
12855 Index += NumElts;
12856 Indices[l + i] = l + Index;
12857 }
12858 }
12859
12860 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12861 makeArrayRef(Indices, NumElts),
12862 "shufp");
12863 }
12864 case X86::BI__builtin_ia32_permdi256:
12865 case X86::BI__builtin_ia32_permdf256:
12866 case X86::BI__builtin_ia32_permdi512:
12867 case X86::BI__builtin_ia32_permdf512: {
12868 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12869 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12870 unsigned NumElts = Ty->getNumElements();
12871
12872 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
12873 int Indices[8];
12874 for (unsigned l = 0; l != NumElts; l += 4)
12875 for (unsigned i = 0; i != 4; ++i)
12876 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
12877
12878 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12879 makeArrayRef(Indices, NumElts),
12880 "perm");
12881 }
12882 case X86::BI__builtin_ia32_palignr128:
12883 case X86::BI__builtin_ia32_palignr256:
12884 case X86::BI__builtin_ia32_palignr512: {
12885 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12886
12887 unsigned NumElts =
12888 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12889 assert(NumElts % 16 == 0);
12890
12891 // If palignr is shifting the pair of vectors more than the size of two
12892 // lanes, emit zero.
12893 if (ShiftVal >= 32)
12894 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12895
12896 // If palignr is shifting the pair of input vectors more than one lane,
12897 // but less than two lanes, convert to shifting in zeroes.
12898 if (ShiftVal > 16) {
12899 ShiftVal -= 16;
12900 Ops[1] = Ops[0];
12901 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
12902 }
12903
12904 int Indices[64];
12905 // 256-bit palignr operates on 128-bit lanes so we need to handle that
12906 for (unsigned l = 0; l != NumElts; l += 16) {
12907 for (unsigned i = 0; i != 16; ++i) {
12908 unsigned Idx = ShiftVal + i;
12909 if (Idx >= 16)
12910 Idx += NumElts - 16; // End of lane, switch operand.
12911 Indices[l + i] = Idx + l;
12912 }
12913 }
12914
12915 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12916 makeArrayRef(Indices, NumElts),
12917 "palignr");
12918 }
12919 case X86::BI__builtin_ia32_alignd128:
12920 case X86::BI__builtin_ia32_alignd256:
12921 case X86::BI__builtin_ia32_alignd512:
12922 case X86::BI__builtin_ia32_alignq128:
12923 case X86::BI__builtin_ia32_alignq256:
12924 case X86::BI__builtin_ia32_alignq512: {
12925 unsigned NumElts =
12926 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12927 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12928
12929 // Mask the shift amount to width of two vectors.
12930 ShiftVal &= (2 * NumElts) - 1;
12931
12932 int Indices[16];
12933 for (unsigned i = 0; i != NumElts; ++i)
12934 Indices[i] = i + ShiftVal;
12935
12936 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12937 makeArrayRef(Indices, NumElts),
12938 "valign");
12939 }
12940 case X86::BI__builtin_ia32_shuf_f32x4_256:
12941 case X86::BI__builtin_ia32_shuf_f64x2_256:
12942 case X86::BI__builtin_ia32_shuf_i32x4_256:
12943 case X86::BI__builtin_ia32_shuf_i64x2_256:
12944 case X86::BI__builtin_ia32_shuf_f32x4:
12945 case X86::BI__builtin_ia32_shuf_f64x2:
12946 case X86::BI__builtin_ia32_shuf_i32x4:
12947 case X86::BI__builtin_ia32_shuf_i64x2: {
12948 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12949 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12950 unsigned NumElts = Ty->getNumElements();
12951 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
12952 unsigned NumLaneElts = NumElts / NumLanes;
12953
12954 int Indices[16];
12955 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12956 unsigned Index = (Imm % NumLanes) * NumLaneElts;
12957 Imm /= NumLanes; // Discard the bits we just used.
12958 if (l >= (NumElts / 2))
12959 Index += NumElts; // Switch to other source.
12960 for (unsigned i = 0; i != NumLaneElts; ++i) {
12961 Indices[l + i] = Index + i;
12962 }
12963 }
12964
12965 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12966 makeArrayRef(Indices, NumElts),
12967 "shuf");
12968 }
12969
12970 case X86::BI__builtin_ia32_vperm2f128_pd256:
12971 case X86::BI__builtin_ia32_vperm2f128_ps256:
12972 case X86::BI__builtin_ia32_vperm2f128_si256:
12973 case X86::BI__builtin_ia32_permti256: {
12974 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12975 unsigned NumElts =
12976 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12977
12978 // This takes a very simple approach since there are two lanes and a
12979 // shuffle can have 2 inputs. So we reserve the first input for the first
12980 // lane and the second input for the second lane. This may result in
12981 // duplicate sources, but this can be dealt with in the backend.
12982
12983 Value *OutOps[2];
12984 int Indices[8];
12985 for (unsigned l = 0; l != 2; ++l) {
12986 // Determine the source for this lane.
12987 if (Imm & (1 << ((l * 4) + 3)))
12988 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
12989 else if (Imm & (1 << ((l * 4) + 1)))
12990 OutOps[l] = Ops[1];
12991 else
12992 OutOps[l] = Ops[0];
12993
12994 for (unsigned i = 0; i != NumElts/2; ++i) {
12995 // Start with ith element of the source for this lane.
12996 unsigned Idx = (l * NumElts) + i;
12997 // If bit 0 of the immediate half is set, switch to the high half of
12998 // the source.
12999 if (Imm & (1 << (l * 4)))
13000 Idx += NumElts/2;
13001 Indices[(l * (NumElts/2)) + i] = Idx;
13002 }
13003 }
13004
13005 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
13006 makeArrayRef(Indices, NumElts),
13007 "vperm");
13008 }
13009
13010 case X86::BI__builtin_ia32_pslldqi128_byteshift:
13011 case X86::BI__builtin_ia32_pslldqi256_byteshift:
13012 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
13013 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13014 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13015 // Builtin type is vXi64 so multiply by 8 to get bytes.
13016 unsigned NumElts = ResultType->getNumElements() * 8;
13017
13018 // If pslldq is shifting the vector more than 15 bytes, emit zero.
13019 if (ShiftVal >= 16)
13020 return llvm::Constant::getNullValue(ResultType);
13021
13022 int Indices[64];
13023 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
13024 for (unsigned l = 0; l != NumElts; l += 16) {
13025 for (unsigned i = 0; i != 16; ++i) {
13026 unsigned Idx = NumElts + i - ShiftVal;
13027 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
13028 Indices[l + i] = Idx + l;
13029 }
13030 }
13031
13032 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13033 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13034 Value *Zero = llvm::Constant::getNullValue(VecTy);
13035 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
13036 makeArrayRef(Indices, NumElts),
13037 "pslldq");
13038 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
13039 }
13040 case X86::BI__builtin_ia32_psrldqi128_byteshift:
13041 case X86::BI__builtin_ia32_psrldqi256_byteshift:
13042 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
13043 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13044 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13045 // Builtin type is vXi64 so multiply by 8 to get bytes.
13046 unsigned NumElts = ResultType->getNumElements() * 8;
13047
13048 // If psrldq is shifting the vector more than 15 bytes, emit zero.
13049 if (ShiftVal >= 16)
13050 return llvm::Constant::getNullValue(ResultType);
13051
13052 int Indices[64];
13053 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
13054 for (unsigned l = 0; l != NumElts; l += 16) {
13055 for (unsigned i = 0; i != 16; ++i) {
13056 unsigned Idx = i + ShiftVal;
13057 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
13058 Indices[l + i] = Idx + l;
13059 }
13060 }
13061
13062 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13063 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13064 Value *Zero = llvm::Constant::getNullValue(VecTy);
13065 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
13066 makeArrayRef(Indices, NumElts),
13067 "psrldq");
13068 return Builder.CreateBitCast(SV, ResultType, "cast");
13069 }
13070 case X86::BI__builtin_ia32_kshiftliqi:
13071 case X86::BI__builtin_ia32_kshiftlihi:
13072 case X86::BI__builtin_ia32_kshiftlisi:
13073 case X86::BI__builtin_ia32_kshiftlidi: {
13074 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13075 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13076
13077 if (ShiftVal >= NumElts)
13078 return llvm::Constant::getNullValue(Ops[0]->getType());
13079
13080 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13081
13082 int Indices[64];
13083 for (unsigned i = 0; i != NumElts; ++i)
13084 Indices[i] = NumElts + i - ShiftVal;
13085
13086 Value *Zero = llvm::Constant::getNullValue(In->getType());
13087 Value *SV = Builder.CreateShuffleVector(Zero, In,
13088 makeArrayRef(Indices, NumElts),
13089 "kshiftl");
13090 return Builder.CreateBitCast(SV, Ops[0]->getType());
13091 }
13092 case X86::BI__builtin_ia32_kshiftriqi:
13093 case X86::BI__builtin_ia32_kshiftrihi:
13094 case X86::BI__builtin_ia32_kshiftrisi:
13095 case X86::BI__builtin_ia32_kshiftridi: {
13096 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13097 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13098
13099 if (ShiftVal >= NumElts)
13100 return llvm::Constant::getNullValue(Ops[0]->getType());
13101
13102 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13103
13104 int Indices[64];
13105 for (unsigned i = 0; i != NumElts; ++i)
13106 Indices[i] = i + ShiftVal;
13107
13108 Value *Zero = llvm::Constant::getNullValue(In->getType());
13109 Value *SV = Builder.CreateShuffleVector(In, Zero,
13110 makeArrayRef(Indices, NumElts),
13111 "kshiftr");
13112 return Builder.CreateBitCast(SV, Ops[0]->getType());
13113 }
13114 case X86::BI__builtin_ia32_movnti:
13115 case X86::BI__builtin_ia32_movnti64:
13116 case X86::BI__builtin_ia32_movntsd:
13117 case X86::BI__builtin_ia32_movntss: {
13118 llvm::MDNode *Node = llvm::MDNode::get(
13119 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
13120
13121 Value *Ptr = Ops[0];
13122 Value *Src = Ops[1];
13123
13124 // Extract the 0'th element of the source vector.
13125 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
13126 BuiltinID == X86::BI__builtin_ia32_movntss)
13127 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
13128
13129 // Convert the type of the pointer to a pointer to the stored type.
13130 Value *BC = Builder.CreateBitCast(
13131 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
13132
13133 // Unaligned nontemporal store of the scalar value.
13134 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
13135 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
13136 SI->setAlignment(llvm::Align(1));
13137 return SI;
13138 }
13139 // Rotate is a special case of funnel shift - 1st 2 args are the same.
13140 case X86::BI__builtin_ia32_vprotb:
13141 case X86::BI__builtin_ia32_vprotw:
13142 case X86::BI__builtin_ia32_vprotd:
13143 case X86::BI__builtin_ia32_vprotq:
13144 case X86::BI__builtin_ia32_vprotbi:
13145 case X86::BI__builtin_ia32_vprotwi:
13146 case X86::BI__builtin_ia32_vprotdi:
13147 case X86::BI__builtin_ia32_vprotqi:
13148 case X86::BI__builtin_ia32_prold128:
13149 case X86::BI__builtin_ia32_prold256:
13150 case X86::BI__builtin_ia32_prold512:
13151 case X86::BI__builtin_ia32_prolq128:
13152 case X86::BI__builtin_ia32_prolq256:
13153 case X86::BI__builtin_ia32_prolq512:
13154 case X86::BI__builtin_ia32_prolvd128:
13155 case X86::BI__builtin_ia32_prolvd256:
13156 case X86::BI__builtin_ia32_prolvd512:
13157 case X86::BI__builtin_ia32_prolvq128:
13158 case X86::BI__builtin_ia32_prolvq256:
13159 case X86::BI__builtin_ia32_prolvq512:
13160 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13161 case X86::BI__builtin_ia32_prord128:
13162 case X86::BI__builtin_ia32_prord256:
13163 case X86::BI__builtin_ia32_prord512:
13164 case X86::BI__builtin_ia32_prorq128:
13165 case X86::BI__builtin_ia32_prorq256:
13166 case X86::BI__builtin_ia32_prorq512:
13167 case X86::BI__builtin_ia32_prorvd128:
13168 case X86::BI__builtin_ia32_prorvd256:
13169 case X86::BI__builtin_ia32_prorvd512:
13170 case X86::BI__builtin_ia32_prorvq128:
13171 case X86::BI__builtin_ia32_prorvq256:
13172 case X86::BI__builtin_ia32_prorvq512:
13173 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13174 case X86::BI__builtin_ia32_selectb_128:
13175 case X86::BI__builtin_ia32_selectb_256:
13176 case X86::BI__builtin_ia32_selectb_512:
13177 case X86::BI__builtin_ia32_selectw_128:
13178 case X86::BI__builtin_ia32_selectw_256:
13179 case X86::BI__builtin_ia32_selectw_512:
13180 case X86::BI__builtin_ia32_selectd_128:
13181 case X86::BI__builtin_ia32_selectd_256:
13182 case X86::BI__builtin_ia32_selectd_512:
13183 case X86::BI__builtin_ia32_selectq_128:
13184 case X86::BI__builtin_ia32_selectq_256:
13185 case X86::BI__builtin_ia32_selectq_512:
13186 case X86::BI__builtin_ia32_selectps_128:
13187 case X86::BI__builtin_ia32_selectps_256:
13188 case X86::BI__builtin_ia32_selectps_512:
13189 case X86::BI__builtin_ia32_selectpd_128:
13190 case X86::BI__builtin_ia32_selectpd_256:
13191 case X86::BI__builtin_ia32_selectpd_512:
13192 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13193 case X86::BI__builtin_ia32_selectss_128:
13194 case X86::BI__builtin_ia32_selectsd_128: {
13195 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13196 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13197 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13198 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13199 }
13200 case X86::BI__builtin_ia32_cmpb128_mask:
13201 case X86::BI__builtin_ia32_cmpb256_mask:
13202 case X86::BI__builtin_ia32_cmpb512_mask:
13203 case X86::BI__builtin_ia32_cmpw128_mask:
13204 case X86::BI__builtin_ia32_cmpw256_mask:
13205 case X86::BI__builtin_ia32_cmpw512_mask:
13206 case X86::BI__builtin_ia32_cmpd128_mask:
13207 case X86::BI__builtin_ia32_cmpd256_mask:
13208 case X86::BI__builtin_ia32_cmpd512_mask:
13209 case X86::BI__builtin_ia32_cmpq128_mask:
13210 case X86::BI__builtin_ia32_cmpq256_mask:
13211 case X86::BI__builtin_ia32_cmpq512_mask: {
13212 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13213 return EmitX86MaskedCompare(*this, CC, true, Ops);
13214 }
13215 case X86::BI__builtin_ia32_ucmpb128_mask:
13216 case X86::BI__builtin_ia32_ucmpb256_mask:
13217 case X86::BI__builtin_ia32_ucmpb512_mask:
13218 case X86::BI__builtin_ia32_ucmpw128_mask:
13219 case X86::BI__builtin_ia32_ucmpw256_mask:
13220 case X86::BI__builtin_ia32_ucmpw512_mask:
13221 case X86::BI__builtin_ia32_ucmpd128_mask:
13222 case X86::BI__builtin_ia32_ucmpd256_mask:
13223 case X86::BI__builtin_ia32_ucmpd512_mask:
13224 case X86::BI__builtin_ia32_ucmpq128_mask:
13225 case X86::BI__builtin_ia32_ucmpq256_mask:
13226 case X86::BI__builtin_ia32_ucmpq512_mask: {
13227 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13228 return EmitX86MaskedCompare(*this, CC, false, Ops);
13229 }
13230 case X86::BI__builtin_ia32_vpcomb:
13231 case X86::BI__builtin_ia32_vpcomw:
13232 case X86::BI__builtin_ia32_vpcomd:
13233 case X86::BI__builtin_ia32_vpcomq:
13234 return EmitX86vpcom(*this, Ops, true);
13235 case X86::BI__builtin_ia32_vpcomub:
13236 case X86::BI__builtin_ia32_vpcomuw:
13237 case X86::BI__builtin_ia32_vpcomud:
13238 case X86::BI__builtin_ia32_vpcomuq:
13239 return EmitX86vpcom(*this, Ops, false);
13240
13241 case X86::BI__builtin_ia32_kortestcqi:
13242 case X86::BI__builtin_ia32_kortestchi:
13243 case X86::BI__builtin_ia32_kortestcsi:
13244 case X86::BI__builtin_ia32_kortestcdi: {
13245 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13246 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13247 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13248 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13249 }
13250 case X86::BI__builtin_ia32_kortestzqi:
13251 case X86::BI__builtin_ia32_kortestzhi:
13252 case X86::BI__builtin_ia32_kortestzsi:
13253 case X86::BI__builtin_ia32_kortestzdi: {
13254 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13255 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13256 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13257 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13258 }
13259
13260 case X86::BI__builtin_ia32_ktestcqi:
13261 case X86::BI__builtin_ia32_ktestzqi:
13262 case X86::BI__builtin_ia32_ktestchi:
13263 case X86::BI__builtin_ia32_ktestzhi:
13264 case X86::BI__builtin_ia32_ktestcsi:
13265 case X86::BI__builtin_ia32_ktestzsi:
13266 case X86::BI__builtin_ia32_ktestcdi:
13267 case X86::BI__builtin_ia32_ktestzdi: {
13268 Intrinsic::ID IID;
13269 switch (BuiltinID) {
13270 default: llvm_unreachable("Unsupported intrinsic!");
13271 case X86::BI__builtin_ia32_ktestcqi:
13272 IID = Intrinsic::x86_avx512_ktestc_b;
13273 break;
13274 case X86::BI__builtin_ia32_ktestzqi:
13275 IID = Intrinsic::x86_avx512_ktestz_b;
13276 break;
13277 case X86::BI__builtin_ia32_ktestchi:
13278 IID = Intrinsic::x86_avx512_ktestc_w;
13279 break;
13280 case X86::BI__builtin_ia32_ktestzhi:
13281 IID = Intrinsic::x86_avx512_ktestz_w;
13282 break;
13283 case X86::BI__builtin_ia32_ktestcsi:
13284 IID = Intrinsic::x86_avx512_ktestc_d;
13285 break;
13286 case X86::BI__builtin_ia32_ktestzsi:
13287 IID = Intrinsic::x86_avx512_ktestz_d;
13288 break;
13289 case X86::BI__builtin_ia32_ktestcdi:
13290 IID = Intrinsic::x86_avx512_ktestc_q;
13291 break;
13292 case X86::BI__builtin_ia32_ktestzdi:
13293 IID = Intrinsic::x86_avx512_ktestz_q;
13294 break;
13295 }
13296
13297 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13298 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13299 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13300 Function *Intr = CGM.getIntrinsic(IID);
13301 return Builder.CreateCall(Intr, {LHS, RHS});
13302 }
13303
13304 case X86::BI__builtin_ia32_kaddqi:
13305 case X86::BI__builtin_ia32_kaddhi:
13306 case X86::BI__builtin_ia32_kaddsi:
13307 case X86::BI__builtin_ia32_kadddi: {
13308 Intrinsic::ID IID;
13309 switch (BuiltinID) {
13310 default: llvm_unreachable("Unsupported intrinsic!");
13311 case X86::BI__builtin_ia32_kaddqi:
13312 IID = Intrinsic::x86_avx512_kadd_b;
13313 break;
13314 case X86::BI__builtin_ia32_kaddhi:
13315 IID = Intrinsic::x86_avx512_kadd_w;
13316 break;
13317 case X86::BI__builtin_ia32_kaddsi:
13318 IID = Intrinsic::x86_avx512_kadd_d;
13319 break;
13320 case X86::BI__builtin_ia32_kadddi:
13321 IID = Intrinsic::x86_avx512_kadd_q;
13322 break;
13323 }
13324
13325 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13326 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13327 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13328 Function *Intr = CGM.getIntrinsic(IID);
13329 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13330 return Builder.CreateBitCast(Res, Ops[0]->getType());
13331 }
13332 case X86::BI__builtin_ia32_kandqi:
13333 case X86::BI__builtin_ia32_kandhi:
13334 case X86::BI__builtin_ia32_kandsi:
13335 case X86::BI__builtin_ia32_kanddi:
13336 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13337 case X86::BI__builtin_ia32_kandnqi:
13338 case X86::BI__builtin_ia32_kandnhi:
13339 case X86::BI__builtin_ia32_kandnsi:
13340 case X86::BI__builtin_ia32_kandndi:
13341 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13342 case X86::BI__builtin_ia32_korqi:
13343 case X86::BI__builtin_ia32_korhi:
13344 case X86::BI__builtin_ia32_korsi:
13345 case X86::BI__builtin_ia32_kordi:
13346 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13347 case X86::BI__builtin_ia32_kxnorqi:
13348 case X86::BI__builtin_ia32_kxnorhi:
13349 case X86::BI__builtin_ia32_kxnorsi:
13350 case X86::BI__builtin_ia32_kxnordi:
13351 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13352 case X86::BI__builtin_ia32_kxorqi:
13353 case X86::BI__builtin_ia32_kxorhi:
13354 case X86::BI__builtin_ia32_kxorsi:
13355 case X86::BI__builtin_ia32_kxordi:
13356 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13357 case X86::BI__builtin_ia32_knotqi:
13358 case X86::BI__builtin_ia32_knothi:
13359 case X86::BI__builtin_ia32_knotsi:
13360 case X86::BI__builtin_ia32_knotdi: {
13361 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13362 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13363 return Builder.CreateBitCast(Builder.CreateNot(Res),
13364 Ops[0]->getType());
13365 }
13366 case X86::BI__builtin_ia32_kmovb:
13367 case X86::BI__builtin_ia32_kmovw:
13368 case X86::BI__builtin_ia32_kmovd:
13369 case X86::BI__builtin_ia32_kmovq: {
13370 // Bitcast to vXi1 type and then back to integer. This gets the mask
13371 // register type into the IR, but might be optimized out depending on
13372 // what's around it.
13373 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13374 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13375 return Builder.CreateBitCast(Res, Ops[0]->getType());
13376 }
13377
13378 case X86::BI__builtin_ia32_kunpckdi:
13379 case X86::BI__builtin_ia32_kunpcksi:
13380 case X86::BI__builtin_ia32_kunpckhi: {
13381 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13382 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13383 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13384 int Indices[64];
13385 for (unsigned i = 0; i != NumElts; ++i)
13386 Indices[i] = i;
13387
13388 // First extract half of each vector. This gives better codegen than
13389 // doing it in a single shuffle.
13390 LHS = Builder.CreateShuffleVector(LHS, LHS,
13391 makeArrayRef(Indices, NumElts / 2));
13392 RHS = Builder.CreateShuffleVector(RHS, RHS,
13393 makeArrayRef(Indices, NumElts / 2));
13394 // Concat the vectors.
13395 // NOTE: Operands are swapped to match the intrinsic definition.
13396 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13397 makeArrayRef(Indices, NumElts));
13398 return Builder.CreateBitCast(Res, Ops[0]->getType());
13399 }
13400
13401 case X86::BI__builtin_ia32_vplzcntd_128:
13402 case X86::BI__builtin_ia32_vplzcntd_256:
13403 case X86::BI__builtin_ia32_vplzcntd_512:
13404 case X86::BI__builtin_ia32_vplzcntq_128:
13405 case X86::BI__builtin_ia32_vplzcntq_256:
13406 case X86::BI__builtin_ia32_vplzcntq_512: {
13407 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13408 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13409 }
13410 case X86::BI__builtin_ia32_sqrtss:
13411 case X86::BI__builtin_ia32_sqrtsd: {
13412 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13413 Function *F;
13414 if (Builder.getIsFPConstrained()) {
13415 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13416 A->getType());
13417 A = Builder.CreateConstrainedFPCall(F, {A});
13418 } else {
13419 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13420 A = Builder.CreateCall(F, {A});
13421 }
13422 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13423 }
13424 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13425 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13426 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13427 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13428 // otherwise keep the intrinsic.
13429 if (CC != 4) {
13430 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13431 Intrinsic::x86_avx512_mask_sqrt_sd :
13432 Intrinsic::x86_avx512_mask_sqrt_ss;
13433 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13434 }
13435 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13436 Function *F;
13437 if (Builder.getIsFPConstrained()) {
13438 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13439 A->getType());
13440 A = Builder.CreateConstrainedFPCall(F, A);
13441 } else {
13442 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13443 A = Builder.CreateCall(F, A);
13444 }
13445 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13446 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13447 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13448 }
13449 case X86::BI__builtin_ia32_sqrtpd256:
13450 case X86::BI__builtin_ia32_sqrtpd:
13451 case X86::BI__builtin_ia32_sqrtps256:
13452 case X86::BI__builtin_ia32_sqrtps:
13453 case X86::BI__builtin_ia32_sqrtps512:
13454 case X86::BI__builtin_ia32_sqrtpd512: {
13455 if (Ops.size() == 2) {
13456 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13457 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13458 // otherwise keep the intrinsic.
13459 if (CC != 4) {
13460 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13461 Intrinsic::x86_avx512_sqrt_ps_512 :
13462 Intrinsic::x86_avx512_sqrt_pd_512;
13463 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13464 }
13465 }
13466 if (Builder.getIsFPConstrained()) {
13467 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13468 Ops[0]->getType());
13469 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13470 } else {
13471 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13472 return Builder.CreateCall(F, Ops[0]);
13473 }
13474 }
13475 case X86::BI__builtin_ia32_pabsb128:
13476 case X86::BI__builtin_ia32_pabsw128:
13477 case X86::BI__builtin_ia32_pabsd128:
13478 case X86::BI__builtin_ia32_pabsb256:
13479 case X86::BI__builtin_ia32_pabsw256:
13480 case X86::BI__builtin_ia32_pabsd256:
13481 case X86::BI__builtin_ia32_pabsq128:
13482 case X86::BI__builtin_ia32_pabsq256:
13483 case X86::BI__builtin_ia32_pabsb512:
13484 case X86::BI__builtin_ia32_pabsw512:
13485 case X86::BI__builtin_ia32_pabsd512:
13486 case X86::BI__builtin_ia32_pabsq512: {
13487 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
13488 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13489 }
13490 case X86::BI__builtin_ia32_pmaxsb128:
13491 case X86::BI__builtin_ia32_pmaxsw128:
13492 case X86::BI__builtin_ia32_pmaxsd128:
13493 case X86::BI__builtin_ia32_pmaxsq128:
13494 case X86::BI__builtin_ia32_pmaxsb256:
13495 case X86::BI__builtin_ia32_pmaxsw256:
13496 case X86::BI__builtin_ia32_pmaxsd256:
13497 case X86::BI__builtin_ia32_pmaxsq256:
13498 case X86::BI__builtin_ia32_pmaxsb512:
13499 case X86::BI__builtin_ia32_pmaxsw512:
13500 case X86::BI__builtin_ia32_pmaxsd512:
13501 case X86::BI__builtin_ia32_pmaxsq512:
13502 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
13503 case X86::BI__builtin_ia32_pmaxub128:
13504 case X86::BI__builtin_ia32_pmaxuw128:
13505 case X86::BI__builtin_ia32_pmaxud128:
13506 case X86::BI__builtin_ia32_pmaxuq128:
13507 case X86::BI__builtin_ia32_pmaxub256:
13508 case X86::BI__builtin_ia32_pmaxuw256:
13509 case X86::BI__builtin_ia32_pmaxud256:
13510 case X86::BI__builtin_ia32_pmaxuq256:
13511 case X86::BI__builtin_ia32_pmaxub512:
13512 case X86::BI__builtin_ia32_pmaxuw512:
13513 case X86::BI__builtin_ia32_pmaxud512:
13514 case X86::BI__builtin_ia32_pmaxuq512:
13515 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
13516 case X86::BI__builtin_ia32_pminsb128:
13517 case X86::BI__builtin_ia32_pminsw128:
13518 case X86::BI__builtin_ia32_pminsd128:
13519 case X86::BI__builtin_ia32_pminsq128:
13520 case X86::BI__builtin_ia32_pminsb256:
13521 case X86::BI__builtin_ia32_pminsw256:
13522 case X86::BI__builtin_ia32_pminsd256:
13523 case X86::BI__builtin_ia32_pminsq256:
13524 case X86::BI__builtin_ia32_pminsb512:
13525 case X86::BI__builtin_ia32_pminsw512:
13526 case X86::BI__builtin_ia32_pminsd512:
13527 case X86::BI__builtin_ia32_pminsq512:
13528 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
13529 case X86::BI__builtin_ia32_pminub128:
13530 case X86::BI__builtin_ia32_pminuw128:
13531 case X86::BI__builtin_ia32_pminud128:
13532 case X86::BI__builtin_ia32_pminuq128:
13533 case X86::BI__builtin_ia32_pminub256:
13534 case X86::BI__builtin_ia32_pminuw256:
13535 case X86::BI__builtin_ia32_pminud256:
13536 case X86::BI__builtin_ia32_pminuq256:
13537 case X86::BI__builtin_ia32_pminub512:
13538 case X86::BI__builtin_ia32_pminuw512:
13539 case X86::BI__builtin_ia32_pminud512:
13540 case X86::BI__builtin_ia32_pminuq512:
13541 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
13542
13543 case X86::BI__builtin_ia32_pmuludq128:
13544 case X86::BI__builtin_ia32_pmuludq256:
13545 case X86::BI__builtin_ia32_pmuludq512:
13546 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13547
13548 case X86::BI__builtin_ia32_pmuldq128:
13549 case X86::BI__builtin_ia32_pmuldq256:
13550 case X86::BI__builtin_ia32_pmuldq512:
13551 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13552
13553 case X86::BI__builtin_ia32_pternlogd512_mask:
13554 case X86::BI__builtin_ia32_pternlogq512_mask:
13555 case X86::BI__builtin_ia32_pternlogd128_mask:
13556 case X86::BI__builtin_ia32_pternlogd256_mask:
13557 case X86::BI__builtin_ia32_pternlogq128_mask:
13558 case X86::BI__builtin_ia32_pternlogq256_mask:
13559 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
13560
13561 case X86::BI__builtin_ia32_pternlogd512_maskz:
13562 case X86::BI__builtin_ia32_pternlogq512_maskz:
13563 case X86::BI__builtin_ia32_pternlogd128_maskz:
13564 case X86::BI__builtin_ia32_pternlogd256_maskz:
13565 case X86::BI__builtin_ia32_pternlogq128_maskz:
13566 case X86::BI__builtin_ia32_pternlogq256_maskz:
13567 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
13568
13569 case X86::BI__builtin_ia32_vpshldd128:
13570 case X86::BI__builtin_ia32_vpshldd256:
13571 case X86::BI__builtin_ia32_vpshldd512:
13572 case X86::BI__builtin_ia32_vpshldq128:
13573 case X86::BI__builtin_ia32_vpshldq256:
13574 case X86::BI__builtin_ia32_vpshldq512:
13575 case X86::BI__builtin_ia32_vpshldw128:
13576 case X86::BI__builtin_ia32_vpshldw256:
13577 case X86::BI__builtin_ia32_vpshldw512:
13578 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13579
13580 case X86::BI__builtin_ia32_vpshrdd128:
13581 case X86::BI__builtin_ia32_vpshrdd256:
13582 case X86::BI__builtin_ia32_vpshrdd512:
13583 case X86::BI__builtin_ia32_vpshrdq128:
13584 case X86::BI__builtin_ia32_vpshrdq256:
13585 case X86::BI__builtin_ia32_vpshrdq512:
13586 case X86::BI__builtin_ia32_vpshrdw128:
13587 case X86::BI__builtin_ia32_vpshrdw256:
13588 case X86::BI__builtin_ia32_vpshrdw512:
13589 // Ops 0 and 1 are swapped.
13590 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13591
13592 case X86::BI__builtin_ia32_vpshldvd128:
13593 case X86::BI__builtin_ia32_vpshldvd256:
13594 case X86::BI__builtin_ia32_vpshldvd512:
13595 case X86::BI__builtin_ia32_vpshldvq128:
13596 case X86::BI__builtin_ia32_vpshldvq256:
13597 case X86::BI__builtin_ia32_vpshldvq512:
13598 case X86::BI__builtin_ia32_vpshldvw128:
13599 case X86::BI__builtin_ia32_vpshldvw256:
13600 case X86::BI__builtin_ia32_vpshldvw512:
13601 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13602
13603 case X86::BI__builtin_ia32_vpshrdvd128:
13604 case X86::BI__builtin_ia32_vpshrdvd256:
13605 case X86::BI__builtin_ia32_vpshrdvd512:
13606 case X86::BI__builtin_ia32_vpshrdvq128:
13607 case X86::BI__builtin_ia32_vpshrdvq256:
13608 case X86::BI__builtin_ia32_vpshrdvq512:
13609 case X86::BI__builtin_ia32_vpshrdvw128:
13610 case X86::BI__builtin_ia32_vpshrdvw256:
13611 case X86::BI__builtin_ia32_vpshrdvw512:
13612 // Ops 0 and 1 are swapped.
13613 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13614
13615 // Reductions
13616 case X86::BI__builtin_ia32_reduce_add_d512:
13617 case X86::BI__builtin_ia32_reduce_add_q512: {
13618 Function *F =
13619 CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
13620 return Builder.CreateCall(F, {Ops[0]});
13621 }
13622 case X86::BI__builtin_ia32_reduce_and_d512:
13623 case X86::BI__builtin_ia32_reduce_and_q512: {
13624 Function *F =
13625 CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
13626 return Builder.CreateCall(F, {Ops[0]});
13627 }
13628 case X86::BI__builtin_ia32_reduce_mul_d512:
13629 case X86::BI__builtin_ia32_reduce_mul_q512: {
13630 Function *F =
13631 CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
13632 return Builder.CreateCall(F, {Ops[0]});
13633 }
13634 case X86::BI__builtin_ia32_reduce_or_d512:
13635 case X86::BI__builtin_ia32_reduce_or_q512: {
13636 Function *F =
13637 CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
13638 return Builder.CreateCall(F, {Ops[0]});
13639 }
13640 case X86::BI__builtin_ia32_reduce_smax_d512:
13641 case X86::BI__builtin_ia32_reduce_smax_q512: {
13642 Function *F =
13643 CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
13644 return Builder.CreateCall(F, {Ops[0]});
13645 }
13646 case X86::BI__builtin_ia32_reduce_smin_d512:
13647 case X86::BI__builtin_ia32_reduce_smin_q512: {
13648 Function *F =
13649 CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
13650 return Builder.CreateCall(F, {Ops[0]});
13651 }
13652 case X86::BI__builtin_ia32_reduce_umax_d512:
13653 case X86::BI__builtin_ia32_reduce_umax_q512: {
13654 Function *F =
13655 CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
13656 return Builder.CreateCall(F, {Ops[0]});
13657 }
13658 case X86::BI__builtin_ia32_reduce_umin_d512:
13659 case X86::BI__builtin_ia32_reduce_umin_q512: {
13660 Function *F =
13661 CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
13662 return Builder.CreateCall(F, {Ops[0]});
13663 }
13664
13665 // 3DNow!
13666 case X86::BI__builtin_ia32_pswapdsf:
13667 case X86::BI__builtin_ia32_pswapdsi: {
13668 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
13669 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
13670 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
13671 return Builder.CreateCall(F, Ops, "pswapd");
13672 }
13673 case X86::BI__builtin_ia32_rdrand16_step:
13674 case X86::BI__builtin_ia32_rdrand32_step:
13675 case X86::BI__builtin_ia32_rdrand64_step:
13676 case X86::BI__builtin_ia32_rdseed16_step:
13677 case X86::BI__builtin_ia32_rdseed32_step:
13678 case X86::BI__builtin_ia32_rdseed64_step: {
13679 Intrinsic::ID ID;
13680 switch (BuiltinID) {
13681 default: llvm_unreachable("Unsupported intrinsic!");
13682 case X86::BI__builtin_ia32_rdrand16_step:
13683 ID = Intrinsic::x86_rdrand_16;
13684 break;
13685 case X86::BI__builtin_ia32_rdrand32_step:
13686 ID = Intrinsic::x86_rdrand_32;
13687 break;
13688 case X86::BI__builtin_ia32_rdrand64_step:
13689 ID = Intrinsic::x86_rdrand_64;
13690 break;
13691 case X86::BI__builtin_ia32_rdseed16_step:
13692 ID = Intrinsic::x86_rdseed_16;
13693 break;
13694 case X86::BI__builtin_ia32_rdseed32_step:
13695 ID = Intrinsic::x86_rdseed_32;
13696 break;
13697 case X86::BI__builtin_ia32_rdseed64_step:
13698 ID = Intrinsic::x86_rdseed_64;
13699 break;
13700 }
13701
13702 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
13703 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
13704 Ops[0]);
13705 return Builder.CreateExtractValue(Call, 1);
13706 }
13707 case X86::BI__builtin_ia32_addcarryx_u32:
13708 case X86::BI__builtin_ia32_addcarryx_u64:
13709 case X86::BI__builtin_ia32_subborrow_u32:
13710 case X86::BI__builtin_ia32_subborrow_u64: {
13711 Intrinsic::ID IID;
13712 switch (BuiltinID) {
13713 default: llvm_unreachable("Unsupported intrinsic!");
13714 case X86::BI__builtin_ia32_addcarryx_u32:
13715 IID = Intrinsic::x86_addcarry_32;
13716 break;
13717 case X86::BI__builtin_ia32_addcarryx_u64:
13718 IID = Intrinsic::x86_addcarry_64;
13719 break;
13720 case X86::BI__builtin_ia32_subborrow_u32:
13721 IID = Intrinsic::x86_subborrow_32;
13722 break;
13723 case X86::BI__builtin_ia32_subborrow_u64:
13724 IID = Intrinsic::x86_subborrow_64;
13725 break;
13726 }
13727
13728 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
13729 { Ops[0], Ops[1], Ops[2] });
13730 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13731 Ops[3]);
13732 return Builder.CreateExtractValue(Call, 0);
13733 }
13734
13735 case X86::BI__builtin_ia32_fpclassps128_mask:
13736 case X86::BI__builtin_ia32_fpclassps256_mask:
13737 case X86::BI__builtin_ia32_fpclassps512_mask:
13738 case X86::BI__builtin_ia32_fpclasspd128_mask:
13739 case X86::BI__builtin_ia32_fpclasspd256_mask:
13740 case X86::BI__builtin_ia32_fpclasspd512_mask: {
13741 unsigned NumElts =
13742 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13743 Value *MaskIn = Ops[2];
13744 Ops.erase(&Ops[2]);
13745
13746 Intrinsic::ID ID;
13747 switch (BuiltinID) {
13748 default: llvm_unreachable("Unsupported intrinsic!");
13749 case X86::BI__builtin_ia32_fpclassps128_mask:
13750 ID = Intrinsic::x86_avx512_fpclass_ps_128;
13751 break;
13752 case X86::BI__builtin_ia32_fpclassps256_mask:
13753 ID = Intrinsic::x86_avx512_fpclass_ps_256;
13754 break;
13755 case X86::BI__builtin_ia32_fpclassps512_mask:
13756 ID = Intrinsic::x86_avx512_fpclass_ps_512;
13757 break;
13758 case X86::BI__builtin_ia32_fpclasspd128_mask:
13759 ID = Intrinsic::x86_avx512_fpclass_pd_128;
13760 break;
13761 case X86::BI__builtin_ia32_fpclasspd256_mask:
13762 ID = Intrinsic::x86_avx512_fpclass_pd_256;
13763 break;
13764 case X86::BI__builtin_ia32_fpclasspd512_mask:
13765 ID = Intrinsic::x86_avx512_fpclass_pd_512;
13766 break;
13767 }
13768
13769 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13770 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
13771 }
13772
13773 case X86::BI__builtin_ia32_vp2intersect_q_512:
13774 case X86::BI__builtin_ia32_vp2intersect_q_256:
13775 case X86::BI__builtin_ia32_vp2intersect_q_128:
13776 case X86::BI__builtin_ia32_vp2intersect_d_512:
13777 case X86::BI__builtin_ia32_vp2intersect_d_256:
13778 case X86::BI__builtin_ia32_vp2intersect_d_128: {
13779 unsigned NumElts =
13780 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13781 Intrinsic::ID ID;
13782
13783 switch (BuiltinID) {
13784 default: llvm_unreachable("Unsupported intrinsic!");
13785 case X86::BI__builtin_ia32_vp2intersect_q_512:
13786 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
13787 break;
13788 case X86::BI__builtin_ia32_vp2intersect_q_256:
13789 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
13790 break;
13791 case X86::BI__builtin_ia32_vp2intersect_q_128:
13792 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
13793 break;
13794 case X86::BI__builtin_ia32_vp2intersect_d_512:
13795 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
13796 break;
13797 case X86::BI__builtin_ia32_vp2intersect_d_256:
13798 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
13799 break;
13800 case X86::BI__builtin_ia32_vp2intersect_d_128:
13801 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
13802 break;
13803 }
13804
13805 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
13806 Value *Result = Builder.CreateExtractValue(Call, 0);
13807 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13808 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
13809
13810 Result = Builder.CreateExtractValue(Call, 1);
13811 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13812 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
13813 }
13814
13815 case X86::BI__builtin_ia32_vpmultishiftqb128:
13816 case X86::BI__builtin_ia32_vpmultishiftqb256:
13817 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13818 Intrinsic::ID ID;
13819 switch (BuiltinID) {
13820 default: llvm_unreachable("Unsupported intrinsic!");
13821 case X86::BI__builtin_ia32_vpmultishiftqb128:
13822 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
13823 break;
13824 case X86::BI__builtin_ia32_vpmultishiftqb256:
13825 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
13826 break;
13827 case X86::BI__builtin_ia32_vpmultishiftqb512:
13828 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
13829 break;
13830 }
13831
13832 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13833 }
13834
13835 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13836 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13837 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
13838 unsigned NumElts =
13839 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13840 Value *MaskIn = Ops[2];
13841 Ops.erase(&Ops[2]);
13842
13843 Intrinsic::ID ID;
13844 switch (BuiltinID) {
13845 default: llvm_unreachable("Unsupported intrinsic!");
13846 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13847 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
13848 break;
13849 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13850 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
13851 break;
13852 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
13853 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
13854 break;
13855 }
13856
13857 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13858 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
13859 }
13860
13861 // packed comparison intrinsics
13862 case X86::BI__builtin_ia32_cmpeqps:
13863 case X86::BI__builtin_ia32_cmpeqpd:
13864 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
13865 case X86::BI__builtin_ia32_cmpltps:
13866 case X86::BI__builtin_ia32_cmpltpd:
13867 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
13868 case X86::BI__builtin_ia32_cmpleps:
13869 case X86::BI__builtin_ia32_cmplepd:
13870 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
13871 case X86::BI__builtin_ia32_cmpunordps:
13872 case X86::BI__builtin_ia32_cmpunordpd:
13873 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
13874 case X86::BI__builtin_ia32_cmpneqps:
13875 case X86::BI__builtin_ia32_cmpneqpd:
13876 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
13877 case X86::BI__builtin_ia32_cmpnltps:
13878 case X86::BI__builtin_ia32_cmpnltpd:
13879 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
13880 case X86::BI__builtin_ia32_cmpnleps:
13881 case X86::BI__builtin_ia32_cmpnlepd:
13882 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
13883 case X86::BI__builtin_ia32_cmpordps:
13884 case X86::BI__builtin_ia32_cmpordpd:
13885 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
13886 case X86::BI__builtin_ia32_cmpps128_mask:
13887 case X86::BI__builtin_ia32_cmpps256_mask:
13888 case X86::BI__builtin_ia32_cmpps512_mask:
13889 case X86::BI__builtin_ia32_cmppd128_mask:
13890 case X86::BI__builtin_ia32_cmppd256_mask:
13891 case X86::BI__builtin_ia32_cmppd512_mask:
13892 IsMaskFCmp = true;
13893 LLVM_FALLTHROUGH;
13894 case X86::BI__builtin_ia32_cmpps:
13895 case X86::BI__builtin_ia32_cmpps256:
13896 case X86::BI__builtin_ia32_cmppd:
13897 case X86::BI__builtin_ia32_cmppd256: {
13898 // Lowering vector comparisons to fcmp instructions, while
13899 // ignoring signalling behaviour requested
13900 // ignoring rounding mode requested
13901 // This is is only possible as long as FENV_ACCESS is not implemented.
13902 // See also: https://reviews.llvm.org/D45616
13903
13904 // The third argument is the comparison condition, and integer in the
13905 // range [0, 31]
13906 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
13907
13908 // Lowering to IR fcmp instruction.
13909 // Ignoring requested signaling behaviour,
13910 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
13911 FCmpInst::Predicate Pred;
13912 bool IsSignaling;
13913 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
13914 // behavior is inverted. We'll handle that after the switch.
13915 switch (CC & 0xf) {
13916 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
13917 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
13918 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
13919 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
13920 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
13921 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
13922 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
13923 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
13924 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
13925 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
13926 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
13927 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
13928 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
13929 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
13930 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
13931 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
13932 default: llvm_unreachable("Unhandled CC");
13933 }
13934
13935 // Invert the signalling behavior for 16-31.
13936 if (CC & 0x10)
13937 IsSignaling = !IsSignaling;
13938
13939 // If the predicate is true or false and we're using constrained intrinsics,
13940 // we don't have a compare intrinsic we can use. Just use the legacy X86
13941 // specific intrinsic.
13942 // If the intrinsic is mask enabled and we're using constrained intrinsics,
13943 // use the legacy X86 specific intrinsic.
13944 if (Builder.getIsFPConstrained() &&
13945 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
13946 IsMaskFCmp)) {
13947
13948 Intrinsic::ID IID;
13949 switch (BuiltinID) {
13950 default: llvm_unreachable("Unexpected builtin");
13951 case X86::BI__builtin_ia32_cmpps:
13952 IID = Intrinsic::x86_sse_cmp_ps;
13953 break;
13954 case X86::BI__builtin_ia32_cmpps256:
13955 IID = Intrinsic::x86_avx_cmp_ps_256;
13956 break;
13957 case X86::BI__builtin_ia32_cmppd:
13958 IID = Intrinsic::x86_sse2_cmp_pd;
13959 break;
13960 case X86::BI__builtin_ia32_cmppd256:
13961 IID = Intrinsic::x86_avx_cmp_pd_256;
13962 break;
13963 case X86::BI__builtin_ia32_cmpps512_mask:
13964 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
13965 break;
13966 case X86::BI__builtin_ia32_cmppd512_mask:
13967 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
13968 break;
13969 case X86::BI__builtin_ia32_cmpps128_mask:
13970 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
13971 break;
13972 case X86::BI__builtin_ia32_cmpps256_mask:
13973 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
13974 break;
13975 case X86::BI__builtin_ia32_cmppd128_mask:
13976 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
13977 break;
13978 case X86::BI__builtin_ia32_cmppd256_mask:
13979 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
13980 break;
13981 }
13982
13983 Function *Intr = CGM.getIntrinsic(IID);
13984 if (IsMaskFCmp) {
13985 unsigned NumElts =
13986 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13987 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
13988 Value *Cmp = Builder.CreateCall(Intr, Ops);
13989 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
13990 }
13991
13992 return Builder.CreateCall(Intr, Ops);
13993 }
13994
13995 // Builtins without the _mask suffix return a vector of integers
13996 // of the same width as the input vectors
13997 if (IsMaskFCmp) {
13998 // We ignore SAE if strict FP is disabled. We only keep precise
13999 // exception behavior under strict FP.
14000 unsigned NumElts =
14001 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14002 Value *Cmp;
14003 if (IsSignaling)
14004 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14005 else
14006 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14007 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
14008 }
14009
14010 return getVectorFCmpIR(Pred, IsSignaling);
14011 }
14012
14013 // SSE scalar comparison intrinsics
14014 case X86::BI__builtin_ia32_cmpeqss:
14015 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
14016 case X86::BI__builtin_ia32_cmpltss:
14017 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
14018 case X86::BI__builtin_ia32_cmpless:
14019 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
14020 case X86::BI__builtin_ia32_cmpunordss:
14021 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
14022 case X86::BI__builtin_ia32_cmpneqss:
14023 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
14024 case X86::BI__builtin_ia32_cmpnltss:
14025 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
14026 case X86::BI__builtin_ia32_cmpnless:
14027 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
14028 case X86::BI__builtin_ia32_cmpordss:
14029 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
14030 case X86::BI__builtin_ia32_cmpeqsd:
14031 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
14032 case X86::BI__builtin_ia32_cmpltsd:
14033 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
14034 case X86::BI__builtin_ia32_cmplesd:
14035 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
14036 case X86::BI__builtin_ia32_cmpunordsd:
14037 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
14038 case X86::BI__builtin_ia32_cmpneqsd:
14039 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
14040 case X86::BI__builtin_ia32_cmpnltsd:
14041 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
14042 case X86::BI__builtin_ia32_cmpnlesd:
14043 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
14044 case X86::BI__builtin_ia32_cmpordsd:
14045 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
14046
14047 // f16c half2float intrinsics
14048 case X86::BI__builtin_ia32_vcvtph2ps:
14049 case X86::BI__builtin_ia32_vcvtph2ps256:
14050 case X86::BI__builtin_ia32_vcvtph2ps_mask:
14051 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
14052 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
14053 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
14054
14055 // AVX512 bf16 intrinsics
14056 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
14057 Ops[2] = getMaskVecValue(
14058 *this, Ops[2],
14059 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
14060 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
14061 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14062 }
14063 case X86::BI__builtin_ia32_cvtsbf162ss_32:
14064 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
14065
14066 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14067 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
14068 Intrinsic::ID IID;
14069 switch (BuiltinID) {
14070 default: llvm_unreachable("Unsupported intrinsic!");
14071 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14072 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
14073 break;
14074 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
14075 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
14076 break;
14077 }
14078 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
14079 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14080 }
14081
14082 case X86::BI__emul:
14083 case X86::BI__emulu: {
14084 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
14085 bool isSigned = (BuiltinID == X86::BI__emul);
14086 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
14087 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
14088 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
14089 }
14090 case X86::BI__mulh:
14091 case X86::BI__umulh:
14092 case X86::BI_mul128:
14093 case X86::BI_umul128: {
14094 llvm::Type *ResType = ConvertType(E->getType());
14095 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
14096
14097 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
14098 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
14099 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
14100
14101 Value *MulResult, *HigherBits;
14102 if (IsSigned) {
14103 MulResult = Builder.CreateNSWMul(LHS, RHS);
14104 HigherBits = Builder.CreateAShr(MulResult, 64);
14105 } else {
14106 MulResult = Builder.CreateNUWMul(LHS, RHS);
14107 HigherBits = Builder.CreateLShr(MulResult, 64);
14108 }
14109 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
14110
14111 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
14112 return HigherBits;
14113
14114 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
14115 Builder.CreateStore(HigherBits, HighBitsAddress);
14116 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
14117 }
14118
14119 case X86::BI__faststorefence: {
14120 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14121 llvm::SyncScope::System);
14122 }
14123 case X86::BI__shiftleft128:
14124 case X86::BI__shiftright128: {
14125 llvm::Function *F = CGM.getIntrinsic(
14126 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
14127 Int64Ty);
14128 // Flip low/high ops and zero-extend amount to matching type.
14129 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
14130 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
14131 std::swap(Ops[0], Ops[1]);
14132 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
14133 return Builder.CreateCall(F, Ops);
14134 }
14135 case X86::BI_ReadWriteBarrier:
14136 case X86::BI_ReadBarrier:
14137 case X86::BI_WriteBarrier: {
14138 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14139 llvm::SyncScope::SingleThread);
14140 }
14141
14142 case X86::BI_AddressOfReturnAddress: {
14143 Function *F =
14144 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14145 return Builder.CreateCall(F);
14146 }
14147 case X86::BI__stosb: {
14148 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14149 // instruction, but it will create a memset that won't be optimized away.
14150 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14151 }
14152 case X86::BI__ud2:
14153 // llvm.trap makes a ud2a instruction on x86.
14154 return EmitTrapCall(Intrinsic::trap);
14155 case X86::BI__int2c: {
14156 // This syscall signals a driver assertion failure in x86 NT kernels.
14157 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14158 llvm::InlineAsm *IA =
14159 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14160 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14161 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14162 llvm::Attribute::NoReturn);
14163 llvm::CallInst *CI = Builder.CreateCall(IA);
14164 CI->setAttributes(NoReturnAttr);
14165 return CI;
14166 }
14167 case X86::BI__readfsbyte:
14168 case X86::BI__readfsword:
14169 case X86::BI__readfsdword:
14170 case X86::BI__readfsqword: {
14171 llvm::Type *IntTy = ConvertType(E->getType());
14172 Value *Ptr =
14173 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14174 LoadInst *Load = Builder.CreateAlignedLoad(
14175 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14176 Load->setVolatile(true);
14177 return Load;
14178 }
14179 case X86::BI__readgsbyte:
14180 case X86::BI__readgsword:
14181 case X86::BI__readgsdword:
14182 case X86::BI__readgsqword: {
14183 llvm::Type *IntTy = ConvertType(E->getType());
14184 Value *Ptr =
14185 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14186 LoadInst *Load = Builder.CreateAlignedLoad(
14187 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14188 Load->setVolatile(true);
14189 return Load;
14190 }
14191 case X86::BI__builtin_ia32_paddsb512:
14192 case X86::BI__builtin_ia32_paddsw512:
14193 case X86::BI__builtin_ia32_paddsb256:
14194 case X86::BI__builtin_ia32_paddsw256:
14195 case X86::BI__builtin_ia32_paddsb128:
14196 case X86::BI__builtin_ia32_paddsw128:
14197 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14198 case X86::BI__builtin_ia32_paddusb512:
14199 case X86::BI__builtin_ia32_paddusw512:
14200 case X86::BI__builtin_ia32_paddusb256:
14201 case X86::BI__builtin_ia32_paddusw256:
14202 case X86::BI__builtin_ia32_paddusb128:
14203 case X86::BI__builtin_ia32_paddusw128:
14204 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14205 case X86::BI__builtin_ia32_psubsb512:
14206 case X86::BI__builtin_ia32_psubsw512:
14207 case X86::BI__builtin_ia32_psubsb256:
14208 case X86::BI__builtin_ia32_psubsw256:
14209 case X86::BI__builtin_ia32_psubsb128:
14210 case X86::BI__builtin_ia32_psubsw128:
14211 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14212 case X86::BI__builtin_ia32_psubusb512:
14213 case X86::BI__builtin_ia32_psubusw512:
14214 case X86::BI__builtin_ia32_psubusb256:
14215 case X86::BI__builtin_ia32_psubusw256:
14216 case X86::BI__builtin_ia32_psubusb128:
14217 case X86::BI__builtin_ia32_psubusw128:
14218 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14219 case X86::BI__builtin_ia32_encodekey128_u32: {
14220 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14221
14222 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14223
14224 for (int i = 0; i < 6; ++i) {
14225 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14226 Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
14227 Ptr = Builder.CreateBitCast(
14228 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14229 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14230 }
14231
14232 return Builder.CreateExtractValue(Call, 0);
14233 }
14234 case X86::BI__builtin_ia32_encodekey256_u32: {
14235 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
14236
14237 Value *Call =
14238 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
14239
14240 for (int i = 0; i < 7; ++i) {
14241 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14242 Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
14243 Ptr = Builder.CreateBitCast(
14244 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14245 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14246 }
14247
14248 return Builder.CreateExtractValue(Call, 0);
14249 }
14250 case X86::BI__builtin_ia32_aesenc128kl_u8:
14251 case X86::BI__builtin_ia32_aesdec128kl_u8:
14252 case X86::BI__builtin_ia32_aesenc256kl_u8:
14253 case X86::BI__builtin_ia32_aesdec256kl_u8: {
14254 Intrinsic::ID IID;
14255 switch (BuiltinID) {
14256 default: llvm_unreachable("Unexpected builtin");
14257 case X86::BI__builtin_ia32_aesenc128kl_u8:
14258 IID = Intrinsic::x86_aesenc128kl;
14259 break;
14260 case X86::BI__builtin_ia32_aesdec128kl_u8:
14261 IID = Intrinsic::x86_aesdec128kl;
14262 break;
14263 case X86::BI__builtin_ia32_aesenc256kl_u8:
14264 IID = Intrinsic::x86_aesenc256kl;
14265 break;
14266 case X86::BI__builtin_ia32_aesdec256kl_u8:
14267 IID = Intrinsic::x86_aesdec256kl;
14268 break;
14269 }
14270
14271 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
14272
14273 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14274 Ops[0]);
14275
14276 return Builder.CreateExtractValue(Call, 0);
14277 }
14278 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14279 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14280 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14281 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
14282 Intrinsic::ID IID;
14283 switch (BuiltinID) {
14284 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14285 IID = Intrinsic::x86_aesencwide128kl;
14286 break;
14287 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14288 IID = Intrinsic::x86_aesdecwide128kl;
14289 break;
14290 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14291 IID = Intrinsic::x86_aesencwide256kl;
14292 break;
14293 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
14294 IID = Intrinsic::x86_aesdecwide256kl;
14295 break;
14296 }
14297
14298 Value *InOps[9];
14299 InOps[0] = Ops[2];
14300 for (int i = 0; i != 8; ++i) {
14301 Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
14302 InOps[i + 1] = Builder.CreateAlignedLoad(Ptr, Align(16));
14303 }
14304
14305 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
14306
14307 for (int i = 0; i != 8; ++i) {
14308 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14309 Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
14310 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
14311 }
14312
14313 return Builder.CreateExtractValue(Call, 0);
14314 }
14315 }
14316 }
14317
EmitPPCBuiltinExpr(unsigned BuiltinID,const CallExpr * E)14318 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14319 const CallExpr *E) {
14320 SmallVector<Value*, 4> Ops;
14321
14322 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14323 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14324
14325 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14326
14327 switch (BuiltinID) {
14328 default: return nullptr;
14329
14330 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14331 // call __builtin_readcyclecounter.
14332 case PPC::BI__builtin_ppc_get_timebase:
14333 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14334
14335 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14336 case PPC::BI__builtin_altivec_lvx:
14337 case PPC::BI__builtin_altivec_lvxl:
14338 case PPC::BI__builtin_altivec_lvebx:
14339 case PPC::BI__builtin_altivec_lvehx:
14340 case PPC::BI__builtin_altivec_lvewx:
14341 case PPC::BI__builtin_altivec_lvsl:
14342 case PPC::BI__builtin_altivec_lvsr:
14343 case PPC::BI__builtin_vsx_lxvd2x:
14344 case PPC::BI__builtin_vsx_lxvw4x:
14345 case PPC::BI__builtin_vsx_lxvd2x_be:
14346 case PPC::BI__builtin_vsx_lxvw4x_be:
14347 case PPC::BI__builtin_vsx_lxvl:
14348 case PPC::BI__builtin_vsx_lxvll:
14349 {
14350 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14351 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14352 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14353 }else {
14354 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14355 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14356 Ops.pop_back();
14357 }
14358
14359 switch (BuiltinID) {
14360 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
14361 case PPC::BI__builtin_altivec_lvx:
14362 ID = Intrinsic::ppc_altivec_lvx;
14363 break;
14364 case PPC::BI__builtin_altivec_lvxl:
14365 ID = Intrinsic::ppc_altivec_lvxl;
14366 break;
14367 case PPC::BI__builtin_altivec_lvebx:
14368 ID = Intrinsic::ppc_altivec_lvebx;
14369 break;
14370 case PPC::BI__builtin_altivec_lvehx:
14371 ID = Intrinsic::ppc_altivec_lvehx;
14372 break;
14373 case PPC::BI__builtin_altivec_lvewx:
14374 ID = Intrinsic::ppc_altivec_lvewx;
14375 break;
14376 case PPC::BI__builtin_altivec_lvsl:
14377 ID = Intrinsic::ppc_altivec_lvsl;
14378 break;
14379 case PPC::BI__builtin_altivec_lvsr:
14380 ID = Intrinsic::ppc_altivec_lvsr;
14381 break;
14382 case PPC::BI__builtin_vsx_lxvd2x:
14383 ID = Intrinsic::ppc_vsx_lxvd2x;
14384 break;
14385 case PPC::BI__builtin_vsx_lxvw4x:
14386 ID = Intrinsic::ppc_vsx_lxvw4x;
14387 break;
14388 case PPC::BI__builtin_vsx_lxvd2x_be:
14389 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14390 break;
14391 case PPC::BI__builtin_vsx_lxvw4x_be:
14392 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14393 break;
14394 case PPC::BI__builtin_vsx_lxvl:
14395 ID = Intrinsic::ppc_vsx_lxvl;
14396 break;
14397 case PPC::BI__builtin_vsx_lxvll:
14398 ID = Intrinsic::ppc_vsx_lxvll;
14399 break;
14400 }
14401 llvm::Function *F = CGM.getIntrinsic(ID);
14402 return Builder.CreateCall(F, Ops, "");
14403 }
14404
14405 // vec_st, vec_xst_be
14406 case PPC::BI__builtin_altivec_stvx:
14407 case PPC::BI__builtin_altivec_stvxl:
14408 case PPC::BI__builtin_altivec_stvebx:
14409 case PPC::BI__builtin_altivec_stvehx:
14410 case PPC::BI__builtin_altivec_stvewx:
14411 case PPC::BI__builtin_vsx_stxvd2x:
14412 case PPC::BI__builtin_vsx_stxvw4x:
14413 case PPC::BI__builtin_vsx_stxvd2x_be:
14414 case PPC::BI__builtin_vsx_stxvw4x_be:
14415 case PPC::BI__builtin_vsx_stxvl:
14416 case PPC::BI__builtin_vsx_stxvll:
14417 {
14418 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14419 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14420 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14421 }else {
14422 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14423 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14424 Ops.pop_back();
14425 }
14426
14427 switch (BuiltinID) {
14428 default: llvm_unreachable("Unsupported st intrinsic!");
14429 case PPC::BI__builtin_altivec_stvx:
14430 ID = Intrinsic::ppc_altivec_stvx;
14431 break;
14432 case PPC::BI__builtin_altivec_stvxl:
14433 ID = Intrinsic::ppc_altivec_stvxl;
14434 break;
14435 case PPC::BI__builtin_altivec_stvebx:
14436 ID = Intrinsic::ppc_altivec_stvebx;
14437 break;
14438 case PPC::BI__builtin_altivec_stvehx:
14439 ID = Intrinsic::ppc_altivec_stvehx;
14440 break;
14441 case PPC::BI__builtin_altivec_stvewx:
14442 ID = Intrinsic::ppc_altivec_stvewx;
14443 break;
14444 case PPC::BI__builtin_vsx_stxvd2x:
14445 ID = Intrinsic::ppc_vsx_stxvd2x;
14446 break;
14447 case PPC::BI__builtin_vsx_stxvw4x:
14448 ID = Intrinsic::ppc_vsx_stxvw4x;
14449 break;
14450 case PPC::BI__builtin_vsx_stxvd2x_be:
14451 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14452 break;
14453 case PPC::BI__builtin_vsx_stxvw4x_be:
14454 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14455 break;
14456 case PPC::BI__builtin_vsx_stxvl:
14457 ID = Intrinsic::ppc_vsx_stxvl;
14458 break;
14459 case PPC::BI__builtin_vsx_stxvll:
14460 ID = Intrinsic::ppc_vsx_stxvll;
14461 break;
14462 }
14463 llvm::Function *F = CGM.getIntrinsic(ID);
14464 return Builder.CreateCall(F, Ops, "");
14465 }
14466 // Square root
14467 case PPC::BI__builtin_vsx_xvsqrtsp:
14468 case PPC::BI__builtin_vsx_xvsqrtdp: {
14469 llvm::Type *ResultType = ConvertType(E->getType());
14470 Value *X = EmitScalarExpr(E->getArg(0));
14471 if (Builder.getIsFPConstrained()) {
14472 llvm::Function *F = CGM.getIntrinsic(
14473 Intrinsic::experimental_constrained_sqrt, ResultType);
14474 return Builder.CreateConstrainedFPCall(F, X);
14475 } else {
14476 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14477 return Builder.CreateCall(F, X);
14478 }
14479 }
14480 // Count leading zeros
14481 case PPC::BI__builtin_altivec_vclzb:
14482 case PPC::BI__builtin_altivec_vclzh:
14483 case PPC::BI__builtin_altivec_vclzw:
14484 case PPC::BI__builtin_altivec_vclzd: {
14485 llvm::Type *ResultType = ConvertType(E->getType());
14486 Value *X = EmitScalarExpr(E->getArg(0));
14487 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14488 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14489 return Builder.CreateCall(F, {X, Undef});
14490 }
14491 case PPC::BI__builtin_altivec_vctzb:
14492 case PPC::BI__builtin_altivec_vctzh:
14493 case PPC::BI__builtin_altivec_vctzw:
14494 case PPC::BI__builtin_altivec_vctzd: {
14495 llvm::Type *ResultType = ConvertType(E->getType());
14496 Value *X = EmitScalarExpr(E->getArg(0));
14497 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14498 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14499 return Builder.CreateCall(F, {X, Undef});
14500 }
14501 case PPC::BI__builtin_altivec_vec_replace_elt:
14502 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
14503 // The third argument of vec_replace_elt and vec_replace_unaligned must
14504 // be a compile time constant and will be emitted either to the vinsw
14505 // or vinsd instruction.
14506 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14507 assert(ArgCI &&
14508 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
14509 llvm::Type *ResultType = ConvertType(E->getType());
14510 llvm::Function *F = nullptr;
14511 Value *Call = nullptr;
14512 int64_t ConstArg = ArgCI->getSExtValue();
14513 unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
14514 bool Is32Bit = false;
14515 assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
14516 // The input to vec_replace_elt is an element index, not a byte index.
14517 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
14518 ConstArg *= ArgWidth / 8;
14519 if (ArgWidth == 32) {
14520 Is32Bit = true;
14521 // When the second argument is 32 bits, it can either be an integer or
14522 // a float. The vinsw intrinsic is used in this case.
14523 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
14524 // Fix the constant according to endianess.
14525 if (getTarget().isLittleEndian())
14526 ConstArg = 12 - ConstArg;
14527 } else {
14528 // When the second argument is 64 bits, it can either be a long long or
14529 // a double. The vinsd intrinsic is used in this case.
14530 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
14531 // Fix the constant for little endian.
14532 if (getTarget().isLittleEndian())
14533 ConstArg = 8 - ConstArg;
14534 }
14535 Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
14536 // Depending on ArgWidth, the input vector could be a float or a double.
14537 // If the input vector is a float type, bitcast the inputs to integers. Or,
14538 // if the input vector is a double, bitcast the inputs to 64-bit integers.
14539 if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
14540 Ops[0] = Builder.CreateBitCast(
14541 Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
14542 : llvm::FixedVectorType::get(Int64Ty, 2));
14543 Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
14544 }
14545 // Emit the call to vinsw or vinsd.
14546 Call = Builder.CreateCall(F, Ops);
14547 // Depending on the builtin, bitcast to the approriate result type.
14548 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14549 !Ops[1]->getType()->isIntegerTy())
14550 return Builder.CreateBitCast(Call, ResultType);
14551 else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14552 Ops[1]->getType()->isIntegerTy())
14553 return Call;
14554 else
14555 return Builder.CreateBitCast(Call,
14556 llvm::FixedVectorType::get(Int8Ty, 16));
14557 }
14558 case PPC::BI__builtin_altivec_vpopcntb:
14559 case PPC::BI__builtin_altivec_vpopcnth:
14560 case PPC::BI__builtin_altivec_vpopcntw:
14561 case PPC::BI__builtin_altivec_vpopcntd: {
14562 llvm::Type *ResultType = ConvertType(E->getType());
14563 Value *X = EmitScalarExpr(E->getArg(0));
14564 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14565 return Builder.CreateCall(F, X);
14566 }
14567 // Copy sign
14568 case PPC::BI__builtin_vsx_xvcpsgnsp:
14569 case PPC::BI__builtin_vsx_xvcpsgndp: {
14570 llvm::Type *ResultType = ConvertType(E->getType());
14571 Value *X = EmitScalarExpr(E->getArg(0));
14572 Value *Y = EmitScalarExpr(E->getArg(1));
14573 ID = Intrinsic::copysign;
14574 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14575 return Builder.CreateCall(F, {X, Y});
14576 }
14577 // Rounding/truncation
14578 case PPC::BI__builtin_vsx_xvrspip:
14579 case PPC::BI__builtin_vsx_xvrdpip:
14580 case PPC::BI__builtin_vsx_xvrdpim:
14581 case PPC::BI__builtin_vsx_xvrspim:
14582 case PPC::BI__builtin_vsx_xvrdpi:
14583 case PPC::BI__builtin_vsx_xvrspi:
14584 case PPC::BI__builtin_vsx_xvrdpic:
14585 case PPC::BI__builtin_vsx_xvrspic:
14586 case PPC::BI__builtin_vsx_xvrdpiz:
14587 case PPC::BI__builtin_vsx_xvrspiz: {
14588 llvm::Type *ResultType = ConvertType(E->getType());
14589 Value *X = EmitScalarExpr(E->getArg(0));
14590 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
14591 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
14592 ID = Builder.getIsFPConstrained()
14593 ? Intrinsic::experimental_constrained_floor
14594 : Intrinsic::floor;
14595 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
14596 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
14597 ID = Builder.getIsFPConstrained()
14598 ? Intrinsic::experimental_constrained_round
14599 : Intrinsic::round;
14600 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
14601 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
14602 ID = Builder.getIsFPConstrained()
14603 ? Intrinsic::experimental_constrained_rint
14604 : Intrinsic::rint;
14605 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
14606 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
14607 ID = Builder.getIsFPConstrained()
14608 ? Intrinsic::experimental_constrained_ceil
14609 : Intrinsic::ceil;
14610 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
14611 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
14612 ID = Builder.getIsFPConstrained()
14613 ? Intrinsic::experimental_constrained_trunc
14614 : Intrinsic::trunc;
14615 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14616 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
14617 : Builder.CreateCall(F, X);
14618 }
14619
14620 // Absolute value
14621 case PPC::BI__builtin_vsx_xvabsdp:
14622 case PPC::BI__builtin_vsx_xvabssp: {
14623 llvm::Type *ResultType = ConvertType(E->getType());
14624 Value *X = EmitScalarExpr(E->getArg(0));
14625 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
14626 return Builder.CreateCall(F, X);
14627 }
14628
14629 // FMA variations
14630 case PPC::BI__builtin_vsx_xvmaddadp:
14631 case PPC::BI__builtin_vsx_xvmaddasp:
14632 case PPC::BI__builtin_vsx_xvnmaddadp:
14633 case PPC::BI__builtin_vsx_xvnmaddasp:
14634 case PPC::BI__builtin_vsx_xvmsubadp:
14635 case PPC::BI__builtin_vsx_xvmsubasp:
14636 case PPC::BI__builtin_vsx_xvnmsubadp:
14637 case PPC::BI__builtin_vsx_xvnmsubasp: {
14638 llvm::Type *ResultType = ConvertType(E->getType());
14639 Value *X = EmitScalarExpr(E->getArg(0));
14640 Value *Y = EmitScalarExpr(E->getArg(1));
14641 Value *Z = EmitScalarExpr(E->getArg(2));
14642 llvm::Function *F;
14643 if (Builder.getIsFPConstrained())
14644 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14645 else
14646 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14647 switch (BuiltinID) {
14648 case PPC::BI__builtin_vsx_xvmaddadp:
14649 case PPC::BI__builtin_vsx_xvmaddasp:
14650 if (Builder.getIsFPConstrained())
14651 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14652 else
14653 return Builder.CreateCall(F, {X, Y, Z});
14654 case PPC::BI__builtin_vsx_xvnmaddadp:
14655 case PPC::BI__builtin_vsx_xvnmaddasp:
14656 if (Builder.getIsFPConstrained())
14657 return Builder.CreateFNeg(
14658 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14659 else
14660 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14661 case PPC::BI__builtin_vsx_xvmsubadp:
14662 case PPC::BI__builtin_vsx_xvmsubasp:
14663 if (Builder.getIsFPConstrained())
14664 return Builder.CreateConstrainedFPCall(
14665 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14666 else
14667 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14668 case PPC::BI__builtin_vsx_xvnmsubadp:
14669 case PPC::BI__builtin_vsx_xvnmsubasp:
14670 if (Builder.getIsFPConstrained())
14671 return Builder.CreateFNeg(
14672 Builder.CreateConstrainedFPCall(
14673 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14674 "neg");
14675 else
14676 return Builder.CreateFNeg(
14677 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14678 "neg");
14679 }
14680 llvm_unreachable("Unknown FMA operation");
14681 return nullptr; // Suppress no-return warning
14682 }
14683
14684 case PPC::BI__builtin_vsx_insertword: {
14685 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
14686
14687 // Third argument is a compile time constant int. It must be clamped to
14688 // to the range [0, 12].
14689 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14690 assert(ArgCI &&
14691 "Third arg to xxinsertw intrinsic must be constant integer");
14692 const int64_t MaxIndex = 12;
14693 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14694
14695 // The builtin semantics don't exactly match the xxinsertw instructions
14696 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
14697 // word from the first argument, and inserts it in the second argument. The
14698 // instruction extracts the word from its second input register and inserts
14699 // it into its first input register, so swap the first and second arguments.
14700 std::swap(Ops[0], Ops[1]);
14701
14702 // Need to cast the second argument from a vector of unsigned int to a
14703 // vector of long long.
14704 Ops[1] =
14705 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14706
14707 if (getTarget().isLittleEndian()) {
14708 // Reverse the double words in the vector we will extract from.
14709 Ops[0] =
14710 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14711 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
14712
14713 // Reverse the index.
14714 Index = MaxIndex - Index;
14715 }
14716
14717 // Intrinsic expects the first arg to be a vector of int.
14718 Ops[0] =
14719 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14720 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
14721 return Builder.CreateCall(F, Ops);
14722 }
14723
14724 case PPC::BI__builtin_vsx_extractuword: {
14725 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
14726
14727 // Intrinsic expects the first argument to be a vector of doublewords.
14728 Ops[0] =
14729 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14730
14731 // The second argument is a compile time constant int that needs to
14732 // be clamped to the range [0, 12].
14733 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
14734 assert(ArgCI &&
14735 "Second Arg to xxextractuw intrinsic must be a constant integer!");
14736 const int64_t MaxIndex = 12;
14737 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14738
14739 if (getTarget().isLittleEndian()) {
14740 // Reverse the index.
14741 Index = MaxIndex - Index;
14742 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14743
14744 // Emit the call, then reverse the double words of the results vector.
14745 Value *Call = Builder.CreateCall(F, Ops);
14746
14747 Value *ShuffleCall =
14748 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
14749 return ShuffleCall;
14750 } else {
14751 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14752 return Builder.CreateCall(F, Ops);
14753 }
14754 }
14755
14756 case PPC::BI__builtin_vsx_xxpermdi: {
14757 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14758 assert(ArgCI && "Third arg must be constant integer!");
14759
14760 unsigned Index = ArgCI->getZExtValue();
14761 Ops[0] =
14762 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14763 Ops[1] =
14764 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14765
14766 // Account for endianness by treating this as just a shuffle. So we use the
14767 // same indices for both LE and BE in order to produce expected results in
14768 // both cases.
14769 int ElemIdx0 = (Index & 2) >> 1;
14770 int ElemIdx1 = 2 + (Index & 1);
14771
14772 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
14773 Value *ShuffleCall =
14774 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14775 QualType BIRetType = E->getType();
14776 auto RetTy = ConvertType(BIRetType);
14777 return Builder.CreateBitCast(ShuffleCall, RetTy);
14778 }
14779
14780 case PPC::BI__builtin_vsx_xxsldwi: {
14781 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14782 assert(ArgCI && "Third argument must be a compile time constant");
14783 unsigned Index = ArgCI->getZExtValue() & 0x3;
14784 Ops[0] =
14785 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14786 Ops[1] =
14787 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
14788
14789 // Create a shuffle mask
14790 int ElemIdx0;
14791 int ElemIdx1;
14792 int ElemIdx2;
14793 int ElemIdx3;
14794 if (getTarget().isLittleEndian()) {
14795 // Little endian element N comes from element 8+N-Index of the
14796 // concatenated wide vector (of course, using modulo arithmetic on
14797 // the total number of elements).
14798 ElemIdx0 = (8 - Index) % 8;
14799 ElemIdx1 = (9 - Index) % 8;
14800 ElemIdx2 = (10 - Index) % 8;
14801 ElemIdx3 = (11 - Index) % 8;
14802 } else {
14803 // Big endian ElemIdx<N> = Index + N
14804 ElemIdx0 = Index;
14805 ElemIdx1 = Index + 1;
14806 ElemIdx2 = Index + 2;
14807 ElemIdx3 = Index + 3;
14808 }
14809
14810 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
14811 Value *ShuffleCall =
14812 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14813 QualType BIRetType = E->getType();
14814 auto RetTy = ConvertType(BIRetType);
14815 return Builder.CreateBitCast(ShuffleCall, RetTy);
14816 }
14817
14818 case PPC::BI__builtin_pack_vector_int128: {
14819 bool isLittleEndian = getTarget().isLittleEndian();
14820 Value *UndefValue =
14821 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
14822 Value *Res = Builder.CreateInsertElement(
14823 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
14824 Res = Builder.CreateInsertElement(Res, Ops[1],
14825 (uint64_t)(isLittleEndian ? 0 : 1));
14826 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
14827 }
14828
14829 case PPC::BI__builtin_unpack_vector_int128: {
14830 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
14831 Value *Unpacked = Builder.CreateBitCast(
14832 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
14833
14834 if (getTarget().isLittleEndian())
14835 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
14836
14837 return Builder.CreateExtractElement(Unpacked, Index);
14838 }
14839
14840 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
14841 // Some of the MMA instructions accumulate their result into an existing
14842 // accumulator whereas the others generate a new accumulator. So we need to
14843 // use custom code generation to expand a builtin call with a pointer to a
14844 // load (if the corresponding instruction accumulates its result) followed by
14845 // the call to the intrinsic and a store of the result.
14846 #define MMA_BUILTIN(Name, Types, Accumulate) \
14847 case PPC::BI__builtin_mma_##Name:
14848 #include "clang/Basic/BuiltinsPPC.def"
14849 {
14850 // The first argument of these two builtins is a pointer used to store their
14851 // result. However, the llvm intrinsics return their result in multiple
14852 // return values. So, here we emit code extracting these values from the
14853 // intrinsic results and storing them using that pointer.
14854 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
14855 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
14856 unsigned NumVecs = 2;
14857 auto Intrinsic = Intrinsic::ppc_mma_disassemble_pair;
14858 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
14859 NumVecs = 4;
14860 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
14861 }
14862 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
14863 Address Addr = EmitPointerWithAlignment(E->getArg(1));
14864 Value *Vec = Builder.CreateLoad(Addr);
14865 Value *Call = Builder.CreateCall(F, {Vec});
14866 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
14867 Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
14868 for (unsigned i=0; i<NumVecs; i++) {
14869 Value *Vec = Builder.CreateExtractValue(Call, i);
14870 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
14871 Value *GEP = Builder.CreateInBoundsGEP(Ptr, Index);
14872 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
14873 }
14874 return Call;
14875 }
14876 bool Accumulate;
14877 switch (BuiltinID) {
14878 #define MMA_BUILTIN(Name, Types, Acc) \
14879 case PPC::BI__builtin_mma_##Name: \
14880 ID = Intrinsic::ppc_mma_##Name; \
14881 Accumulate = Acc; \
14882 break;
14883 #include "clang/Basic/BuiltinsPPC.def"
14884 }
14885 if (BuiltinID == PPC::BI__builtin_mma_lxvp ||
14886 BuiltinID == PPC::BI__builtin_mma_stxvp) {
14887 if (BuiltinID == PPC::BI__builtin_mma_lxvp) {
14888 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14889 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14890 } else {
14891 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14892 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14893 }
14894 Ops.pop_back();
14895 llvm::Function *F = CGM.getIntrinsic(ID);
14896 return Builder.CreateCall(F, Ops, "");
14897 }
14898 SmallVector<Value*, 4> CallOps;
14899 if (Accumulate) {
14900 Address Addr = EmitPointerWithAlignment(E->getArg(0));
14901 Value *Acc = Builder.CreateLoad(Addr);
14902 CallOps.push_back(Acc);
14903 }
14904 for (unsigned i=1; i<Ops.size(); i++)
14905 CallOps.push_back(Ops[i]);
14906 llvm::Function *F = CGM.getIntrinsic(ID);
14907 Value *Call = Builder.CreateCall(F, CallOps);
14908 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
14909 }
14910 }
14911 }
14912
14913 namespace {
14914 // If \p E is not null pointer, insert address space cast to match return
14915 // type of \p E if necessary.
EmitAMDGPUDispatchPtr(CodeGenFunction & CGF,const CallExpr * E=nullptr)14916 Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
14917 const CallExpr *E = nullptr) {
14918 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
14919 auto *Call = CGF.Builder.CreateCall(F);
14920 Call->addAttribute(
14921 AttributeList::ReturnIndex,
14922 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
14923 Call->addAttribute(AttributeList::ReturnIndex,
14924 Attribute::getWithAlignment(Call->getContext(), Align(4)));
14925 if (!E)
14926 return Call;
14927 QualType BuiltinRetType = E->getType();
14928 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
14929 if (RetTy == Call->getType())
14930 return Call;
14931 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
14932 }
14933
14934 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
EmitAMDGPUWorkGroupSize(CodeGenFunction & CGF,unsigned Index)14935 Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
14936 const unsigned XOffset = 4;
14937 auto *DP = EmitAMDGPUDispatchPtr(CGF);
14938 // Indexing the HSA kernel_dispatch_packet struct.
14939 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
14940 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
14941 auto *DstTy =
14942 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
14943 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
14944 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
14945 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
14946 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
14947 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
14948 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
14949 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
14950 llvm::MDNode::get(CGF.getLLVMContext(), None));
14951 return LD;
14952 }
14953
14954 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
EmitAMDGPUGridSize(CodeGenFunction & CGF,unsigned Index)14955 Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
14956 const unsigned XOffset = 12;
14957 auto *DP = EmitAMDGPUDispatchPtr(CGF);
14958 // Indexing the HSA kernel_dispatch_packet struct.
14959 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
14960 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
14961 auto *DstTy =
14962 CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
14963 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
14964 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
14965 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
14966 llvm::MDNode::get(CGF.getLLVMContext(), None));
14967 return LD;
14968 }
14969 } // namespace
14970
14971 // For processing memory ordering and memory scope arguments of various
14972 // amdgcn builtins.
14973 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
14974 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
14975 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
14976 // specific SyncScopeID and writes it to \p SSID.
ProcessOrderScopeAMDGCN(Value * Order,Value * Scope,llvm::AtomicOrdering & AO,llvm::SyncScope::ID & SSID)14977 bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
14978 llvm::AtomicOrdering &AO,
14979 llvm::SyncScope::ID &SSID) {
14980 if (isa<llvm::ConstantInt>(Order)) {
14981 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
14982
14983 // Map C11/C++11 memory ordering to LLVM memory ordering
14984 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
14985 case llvm::AtomicOrderingCABI::acquire:
14986 AO = llvm::AtomicOrdering::Acquire;
14987 break;
14988 case llvm::AtomicOrderingCABI::release:
14989 AO = llvm::AtomicOrdering::Release;
14990 break;
14991 case llvm::AtomicOrderingCABI::acq_rel:
14992 AO = llvm::AtomicOrdering::AcquireRelease;
14993 break;
14994 case llvm::AtomicOrderingCABI::seq_cst:
14995 AO = llvm::AtomicOrdering::SequentiallyConsistent;
14996 break;
14997 case llvm::AtomicOrderingCABI::consume:
14998 case llvm::AtomicOrderingCABI::relaxed:
14999 break;
15000 }
15001
15002 StringRef scp;
15003 llvm::getConstantStringInfo(Scope, scp);
15004 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
15005 return true;
15006 }
15007 return false;
15008 }
15009
EmitAMDGPUBuiltinExpr(unsigned BuiltinID,const CallExpr * E)15010 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
15011 const CallExpr *E) {
15012 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
15013 llvm::SyncScope::ID SSID;
15014 switch (BuiltinID) {
15015 case AMDGPU::BI__builtin_amdgcn_div_scale:
15016 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
15017 // Translate from the intrinsics's struct return to the builtin's out
15018 // argument.
15019
15020 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
15021
15022 llvm::Value *X = EmitScalarExpr(E->getArg(0));
15023 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
15024 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
15025
15026 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
15027 X->getType());
15028
15029 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
15030
15031 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
15032 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
15033
15034 llvm::Type *RealFlagType
15035 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
15036
15037 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
15038 Builder.CreateStore(FlagExt, FlagOutPtr);
15039 return Result;
15040 }
15041 case AMDGPU::BI__builtin_amdgcn_div_fmas:
15042 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
15043 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15044 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15045 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15046 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15047
15048 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
15049 Src0->getType());
15050 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
15051 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
15052 }
15053
15054 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
15055 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
15056 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
15057 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
15058 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
15059 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
15060 llvm::SmallVector<llvm::Value *, 6> Args;
15061 for (unsigned I = 0; I != E->getNumArgs(); ++I)
15062 Args.push_back(EmitScalarExpr(E->getArg(I)));
15063 assert(Args.size() == 5 || Args.size() == 6);
15064 if (Args.size() == 5)
15065 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
15066 Function *F =
15067 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
15068 return Builder.CreateCall(F, Args);
15069 }
15070 case AMDGPU::BI__builtin_amdgcn_div_fixup:
15071 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
15072 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
15073 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
15074 case AMDGPU::BI__builtin_amdgcn_trig_preop:
15075 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
15076 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
15077 case AMDGPU::BI__builtin_amdgcn_rcp:
15078 case AMDGPU::BI__builtin_amdgcn_rcpf:
15079 case AMDGPU::BI__builtin_amdgcn_rcph:
15080 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
15081 case AMDGPU::BI__builtin_amdgcn_sqrt:
15082 case AMDGPU::BI__builtin_amdgcn_sqrtf:
15083 case AMDGPU::BI__builtin_amdgcn_sqrth:
15084 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
15085 case AMDGPU::BI__builtin_amdgcn_rsq:
15086 case AMDGPU::BI__builtin_amdgcn_rsqf:
15087 case AMDGPU::BI__builtin_amdgcn_rsqh:
15088 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
15089 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
15090 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
15091 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
15092 case AMDGPU::BI__builtin_amdgcn_sinf:
15093 case AMDGPU::BI__builtin_amdgcn_sinh:
15094 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
15095 case AMDGPU::BI__builtin_amdgcn_cosf:
15096 case AMDGPU::BI__builtin_amdgcn_cosh:
15097 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
15098 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
15099 return EmitAMDGPUDispatchPtr(*this, E);
15100 case AMDGPU::BI__builtin_amdgcn_log_clampf:
15101 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
15102 case AMDGPU::BI__builtin_amdgcn_ldexp:
15103 case AMDGPU::BI__builtin_amdgcn_ldexpf:
15104 case AMDGPU::BI__builtin_amdgcn_ldexph:
15105 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
15106 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
15107 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
15108 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
15109 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
15110 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
15111 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
15112 Value *Src0 = EmitScalarExpr(E->getArg(0));
15113 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15114 { Builder.getInt32Ty(), Src0->getType() });
15115 return Builder.CreateCall(F, Src0);
15116 }
15117 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
15118 Value *Src0 = EmitScalarExpr(E->getArg(0));
15119 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15120 { Builder.getInt16Ty(), Src0->getType() });
15121 return Builder.CreateCall(F, Src0);
15122 }
15123 case AMDGPU::BI__builtin_amdgcn_fract:
15124 case AMDGPU::BI__builtin_amdgcn_fractf:
15125 case AMDGPU::BI__builtin_amdgcn_fracth:
15126 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
15127 case AMDGPU::BI__builtin_amdgcn_lerp:
15128 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
15129 case AMDGPU::BI__builtin_amdgcn_ubfe:
15130 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
15131 case AMDGPU::BI__builtin_amdgcn_sbfe:
15132 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
15133 case AMDGPU::BI__builtin_amdgcn_uicmp:
15134 case AMDGPU::BI__builtin_amdgcn_uicmpl:
15135 case AMDGPU::BI__builtin_amdgcn_sicmp:
15136 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
15137 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15138 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15139 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15140
15141 // FIXME-GFX10: How should 32 bit mask be handled?
15142 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
15143 { Builder.getInt64Ty(), Src0->getType() });
15144 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15145 }
15146 case AMDGPU::BI__builtin_amdgcn_fcmp:
15147 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
15148 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15149 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15150 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15151
15152 // FIXME-GFX10: How should 32 bit mask be handled?
15153 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
15154 { Builder.getInt64Ty(), Src0->getType() });
15155 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15156 }
15157 case AMDGPU::BI__builtin_amdgcn_class:
15158 case AMDGPU::BI__builtin_amdgcn_classf:
15159 case AMDGPU::BI__builtin_amdgcn_classh:
15160 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
15161 case AMDGPU::BI__builtin_amdgcn_fmed3f:
15162 case AMDGPU::BI__builtin_amdgcn_fmed3h:
15163 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
15164 case AMDGPU::BI__builtin_amdgcn_ds_append:
15165 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
15166 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
15167 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
15168 Value *Src0 = EmitScalarExpr(E->getArg(0));
15169 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
15170 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
15171 }
15172 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15173 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15174 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
15175 Intrinsic::ID Intrin;
15176 switch (BuiltinID) {
15177 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15178 Intrin = Intrinsic::amdgcn_ds_fadd;
15179 break;
15180 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15181 Intrin = Intrinsic::amdgcn_ds_fmin;
15182 break;
15183 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
15184 Intrin = Intrinsic::amdgcn_ds_fmax;
15185 break;
15186 }
15187 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15188 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15189 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15190 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15191 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
15192 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
15193 llvm::FunctionType *FTy = F->getFunctionType();
15194 llvm::Type *PTy = FTy->getParamType(0);
15195 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
15196 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
15197 }
15198 case AMDGPU::BI__builtin_amdgcn_read_exec: {
15199 CallInst *CI = cast<CallInst>(
15200 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
15201 CI->setConvergent();
15202 return CI;
15203 }
15204 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
15205 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
15206 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
15207 "exec_lo" : "exec_hi";
15208 CallInst *CI = cast<CallInst>(
15209 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
15210 CI->setConvergent();
15211 return CI;
15212 }
15213 // amdgcn workitem
15214 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
15215 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
15216 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
15217 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
15218 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
15219 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
15220
15221 // amdgcn workgroup size
15222 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
15223 return EmitAMDGPUWorkGroupSize(*this, 0);
15224 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
15225 return EmitAMDGPUWorkGroupSize(*this, 1);
15226 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
15227 return EmitAMDGPUWorkGroupSize(*this, 2);
15228
15229 // amdgcn grid size
15230 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
15231 return EmitAMDGPUGridSize(*this, 0);
15232 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
15233 return EmitAMDGPUGridSize(*this, 1);
15234 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
15235 return EmitAMDGPUGridSize(*this, 2);
15236
15237 // r600 intrinsics
15238 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
15239 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
15240 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
15241 case AMDGPU::BI__builtin_r600_read_tidig_x:
15242 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
15243 case AMDGPU::BI__builtin_r600_read_tidig_y:
15244 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
15245 case AMDGPU::BI__builtin_r600_read_tidig_z:
15246 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
15247 case AMDGPU::BI__builtin_amdgcn_alignbit: {
15248 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15249 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15250 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15251 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
15252 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15253 }
15254
15255 case AMDGPU::BI__builtin_amdgcn_fence: {
15256 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
15257 EmitScalarExpr(E->getArg(1)), AO, SSID))
15258 return Builder.CreateFence(AO, SSID);
15259 LLVM_FALLTHROUGH;
15260 }
15261 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15262 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15263 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15264 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
15265 unsigned BuiltinAtomicOp;
15266 llvm::Type *ResultType = ConvertType(E->getType());
15267
15268 switch (BuiltinID) {
15269 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15270 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15271 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
15272 break;
15273 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15274 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
15275 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
15276 break;
15277 }
15278
15279 Value *Ptr = EmitScalarExpr(E->getArg(0));
15280 Value *Val = EmitScalarExpr(E->getArg(1));
15281
15282 llvm::Function *F =
15283 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
15284
15285 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
15286 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
15287
15288 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
15289 // scope as unsigned values
15290 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
15291 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
15292
15293 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
15294 bool Volatile =
15295 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
15296 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
15297
15298 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
15299 }
15300 LLVM_FALLTHROUGH;
15301 }
15302 default:
15303 return nullptr;
15304 }
15305 }
15306
15307 /// Handle a SystemZ function in which the final argument is a pointer
15308 /// to an int that receives the post-instruction CC value. At the LLVM level
15309 /// this is represented as a function that returns a {result, cc} pair.
EmitSystemZIntrinsicWithCC(CodeGenFunction & CGF,unsigned IntrinsicID,const CallExpr * E)15310 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
15311 unsigned IntrinsicID,
15312 const CallExpr *E) {
15313 unsigned NumArgs = E->getNumArgs() - 1;
15314 SmallVector<Value *, 8> Args(NumArgs);
15315 for (unsigned I = 0; I < NumArgs; ++I)
15316 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
15317 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
15318 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
15319 Value *Call = CGF.Builder.CreateCall(F, Args);
15320 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
15321 CGF.Builder.CreateStore(CC, CCPtr);
15322 return CGF.Builder.CreateExtractValue(Call, 0);
15323 }
15324
EmitSystemZBuiltinExpr(unsigned BuiltinID,const CallExpr * E)15325 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
15326 const CallExpr *E) {
15327 switch (BuiltinID) {
15328 case SystemZ::BI__builtin_tbegin: {
15329 Value *TDB = EmitScalarExpr(E->getArg(0));
15330 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15331 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
15332 return Builder.CreateCall(F, {TDB, Control});
15333 }
15334 case SystemZ::BI__builtin_tbegin_nofloat: {
15335 Value *TDB = EmitScalarExpr(E->getArg(0));
15336 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15337 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
15338 return Builder.CreateCall(F, {TDB, Control});
15339 }
15340 case SystemZ::BI__builtin_tbeginc: {
15341 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
15342 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
15343 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
15344 return Builder.CreateCall(F, {TDB, Control});
15345 }
15346 case SystemZ::BI__builtin_tabort: {
15347 Value *Data = EmitScalarExpr(E->getArg(0));
15348 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
15349 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
15350 }
15351 case SystemZ::BI__builtin_non_tx_store: {
15352 Value *Address = EmitScalarExpr(E->getArg(0));
15353 Value *Data = EmitScalarExpr(E->getArg(1));
15354 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
15355 return Builder.CreateCall(F, {Data, Address});
15356 }
15357
15358 // Vector builtins. Note that most vector builtins are mapped automatically
15359 // to target-specific LLVM intrinsics. The ones handled specially here can
15360 // be represented via standard LLVM IR, which is preferable to enable common
15361 // LLVM optimizations.
15362
15363 case SystemZ::BI__builtin_s390_vpopctb:
15364 case SystemZ::BI__builtin_s390_vpopcth:
15365 case SystemZ::BI__builtin_s390_vpopctf:
15366 case SystemZ::BI__builtin_s390_vpopctg: {
15367 llvm::Type *ResultType = ConvertType(E->getType());
15368 Value *X = EmitScalarExpr(E->getArg(0));
15369 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15370 return Builder.CreateCall(F, X);
15371 }
15372
15373 case SystemZ::BI__builtin_s390_vclzb:
15374 case SystemZ::BI__builtin_s390_vclzh:
15375 case SystemZ::BI__builtin_s390_vclzf:
15376 case SystemZ::BI__builtin_s390_vclzg: {
15377 llvm::Type *ResultType = ConvertType(E->getType());
15378 Value *X = EmitScalarExpr(E->getArg(0));
15379 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15380 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15381 return Builder.CreateCall(F, {X, Undef});
15382 }
15383
15384 case SystemZ::BI__builtin_s390_vctzb:
15385 case SystemZ::BI__builtin_s390_vctzh:
15386 case SystemZ::BI__builtin_s390_vctzf:
15387 case SystemZ::BI__builtin_s390_vctzg: {
15388 llvm::Type *ResultType = ConvertType(E->getType());
15389 Value *X = EmitScalarExpr(E->getArg(0));
15390 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15391 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15392 return Builder.CreateCall(F, {X, Undef});
15393 }
15394
15395 case SystemZ::BI__builtin_s390_vfsqsb:
15396 case SystemZ::BI__builtin_s390_vfsqdb: {
15397 llvm::Type *ResultType = ConvertType(E->getType());
15398 Value *X = EmitScalarExpr(E->getArg(0));
15399 if (Builder.getIsFPConstrained()) {
15400 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
15401 return Builder.CreateConstrainedFPCall(F, { X });
15402 } else {
15403 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15404 return Builder.CreateCall(F, X);
15405 }
15406 }
15407 case SystemZ::BI__builtin_s390_vfmasb:
15408 case SystemZ::BI__builtin_s390_vfmadb: {
15409 llvm::Type *ResultType = ConvertType(E->getType());
15410 Value *X = EmitScalarExpr(E->getArg(0));
15411 Value *Y = EmitScalarExpr(E->getArg(1));
15412 Value *Z = EmitScalarExpr(E->getArg(2));
15413 if (Builder.getIsFPConstrained()) {
15414 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15415 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15416 } else {
15417 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15418 return Builder.CreateCall(F, {X, Y, Z});
15419 }
15420 }
15421 case SystemZ::BI__builtin_s390_vfmssb:
15422 case SystemZ::BI__builtin_s390_vfmsdb: {
15423 llvm::Type *ResultType = ConvertType(E->getType());
15424 Value *X = EmitScalarExpr(E->getArg(0));
15425 Value *Y = EmitScalarExpr(E->getArg(1));
15426 Value *Z = EmitScalarExpr(E->getArg(2));
15427 if (Builder.getIsFPConstrained()) {
15428 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15429 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15430 } else {
15431 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15432 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15433 }
15434 }
15435 case SystemZ::BI__builtin_s390_vfnmasb:
15436 case SystemZ::BI__builtin_s390_vfnmadb: {
15437 llvm::Type *ResultType = ConvertType(E->getType());
15438 Value *X = EmitScalarExpr(E->getArg(0));
15439 Value *Y = EmitScalarExpr(E->getArg(1));
15440 Value *Z = EmitScalarExpr(E->getArg(2));
15441 if (Builder.getIsFPConstrained()) {
15442 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15443 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15444 } else {
15445 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15446 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15447 }
15448 }
15449 case SystemZ::BI__builtin_s390_vfnmssb:
15450 case SystemZ::BI__builtin_s390_vfnmsdb: {
15451 llvm::Type *ResultType = ConvertType(E->getType());
15452 Value *X = EmitScalarExpr(E->getArg(0));
15453 Value *Y = EmitScalarExpr(E->getArg(1));
15454 Value *Z = EmitScalarExpr(E->getArg(2));
15455 if (Builder.getIsFPConstrained()) {
15456 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15457 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15458 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15459 } else {
15460 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15461 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15462 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15463 }
15464 }
15465 case SystemZ::BI__builtin_s390_vflpsb:
15466 case SystemZ::BI__builtin_s390_vflpdb: {
15467 llvm::Type *ResultType = ConvertType(E->getType());
15468 Value *X = EmitScalarExpr(E->getArg(0));
15469 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15470 return Builder.CreateCall(F, X);
15471 }
15472 case SystemZ::BI__builtin_s390_vflnsb:
15473 case SystemZ::BI__builtin_s390_vflndb: {
15474 llvm::Type *ResultType = ConvertType(E->getType());
15475 Value *X = EmitScalarExpr(E->getArg(0));
15476 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15477 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15478 }
15479 case SystemZ::BI__builtin_s390_vfisb:
15480 case SystemZ::BI__builtin_s390_vfidb: {
15481 llvm::Type *ResultType = ConvertType(E->getType());
15482 Value *X = EmitScalarExpr(E->getArg(0));
15483 // Constant-fold the M4 and M5 mask arguments.
15484 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
15485 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15486 // Check whether this instance can be represented via a LLVM standard
15487 // intrinsic. We only support some combinations of M4 and M5.
15488 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15489 Intrinsic::ID CI;
15490 switch (M4.getZExtValue()) {
15491 default: break;
15492 case 0: // IEEE-inexact exception allowed
15493 switch (M5.getZExtValue()) {
15494 default: break;
15495 case 0: ID = Intrinsic::rint;
15496 CI = Intrinsic::experimental_constrained_rint; break;
15497 }
15498 break;
15499 case 4: // IEEE-inexact exception suppressed
15500 switch (M5.getZExtValue()) {
15501 default: break;
15502 case 0: ID = Intrinsic::nearbyint;
15503 CI = Intrinsic::experimental_constrained_nearbyint; break;
15504 case 1: ID = Intrinsic::round;
15505 CI = Intrinsic::experimental_constrained_round; break;
15506 case 5: ID = Intrinsic::trunc;
15507 CI = Intrinsic::experimental_constrained_trunc; break;
15508 case 6: ID = Intrinsic::ceil;
15509 CI = Intrinsic::experimental_constrained_ceil; break;
15510 case 7: ID = Intrinsic::floor;
15511 CI = Intrinsic::experimental_constrained_floor; break;
15512 }
15513 break;
15514 }
15515 if (ID != Intrinsic::not_intrinsic) {
15516 if (Builder.getIsFPConstrained()) {
15517 Function *F = CGM.getIntrinsic(CI, ResultType);
15518 return Builder.CreateConstrainedFPCall(F, X);
15519 } else {
15520 Function *F = CGM.getIntrinsic(ID, ResultType);
15521 return Builder.CreateCall(F, X);
15522 }
15523 }
15524 switch (BuiltinID) { // FIXME: constrained version?
15525 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
15526 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
15527 default: llvm_unreachable("Unknown BuiltinID");
15528 }
15529 Function *F = CGM.getIntrinsic(ID);
15530 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15531 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
15532 return Builder.CreateCall(F, {X, M4Value, M5Value});
15533 }
15534 case SystemZ::BI__builtin_s390_vfmaxsb:
15535 case SystemZ::BI__builtin_s390_vfmaxdb: {
15536 llvm::Type *ResultType = ConvertType(E->getType());
15537 Value *X = EmitScalarExpr(E->getArg(0));
15538 Value *Y = EmitScalarExpr(E->getArg(1));
15539 // Constant-fold the M4 mask argument.
15540 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15541 // Check whether this instance can be represented via a LLVM standard
15542 // intrinsic. We only support some values of M4.
15543 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15544 Intrinsic::ID CI;
15545 switch (M4.getZExtValue()) {
15546 default: break;
15547 case 4: ID = Intrinsic::maxnum;
15548 CI = Intrinsic::experimental_constrained_maxnum; break;
15549 }
15550 if (ID != Intrinsic::not_intrinsic) {
15551 if (Builder.getIsFPConstrained()) {
15552 Function *F = CGM.getIntrinsic(CI, ResultType);
15553 return Builder.CreateConstrainedFPCall(F, {X, Y});
15554 } else {
15555 Function *F = CGM.getIntrinsic(ID, ResultType);
15556 return Builder.CreateCall(F, {X, Y});
15557 }
15558 }
15559 switch (BuiltinID) {
15560 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
15561 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
15562 default: llvm_unreachable("Unknown BuiltinID");
15563 }
15564 Function *F = CGM.getIntrinsic(ID);
15565 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15566 return Builder.CreateCall(F, {X, Y, M4Value});
15567 }
15568 case SystemZ::BI__builtin_s390_vfminsb:
15569 case SystemZ::BI__builtin_s390_vfmindb: {
15570 llvm::Type *ResultType = ConvertType(E->getType());
15571 Value *X = EmitScalarExpr(E->getArg(0));
15572 Value *Y = EmitScalarExpr(E->getArg(1));
15573 // Constant-fold the M4 mask argument.
15574 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15575 // Check whether this instance can be represented via a LLVM standard
15576 // intrinsic. We only support some values of M4.
15577 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15578 Intrinsic::ID CI;
15579 switch (M4.getZExtValue()) {
15580 default: break;
15581 case 4: ID = Intrinsic::minnum;
15582 CI = Intrinsic::experimental_constrained_minnum; break;
15583 }
15584 if (ID != Intrinsic::not_intrinsic) {
15585 if (Builder.getIsFPConstrained()) {
15586 Function *F = CGM.getIntrinsic(CI, ResultType);
15587 return Builder.CreateConstrainedFPCall(F, {X, Y});
15588 } else {
15589 Function *F = CGM.getIntrinsic(ID, ResultType);
15590 return Builder.CreateCall(F, {X, Y});
15591 }
15592 }
15593 switch (BuiltinID) {
15594 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
15595 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
15596 default: llvm_unreachable("Unknown BuiltinID");
15597 }
15598 Function *F = CGM.getIntrinsic(ID);
15599 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15600 return Builder.CreateCall(F, {X, Y, M4Value});
15601 }
15602
15603 case SystemZ::BI__builtin_s390_vlbrh:
15604 case SystemZ::BI__builtin_s390_vlbrf:
15605 case SystemZ::BI__builtin_s390_vlbrg: {
15606 llvm::Type *ResultType = ConvertType(E->getType());
15607 Value *X = EmitScalarExpr(E->getArg(0));
15608 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
15609 return Builder.CreateCall(F, X);
15610 }
15611
15612 // Vector intrinsics that output the post-instruction CC value.
15613
15614 #define INTRINSIC_WITH_CC(NAME) \
15615 case SystemZ::BI__builtin_##NAME: \
15616 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
15617
15618 INTRINSIC_WITH_CC(s390_vpkshs);
15619 INTRINSIC_WITH_CC(s390_vpksfs);
15620 INTRINSIC_WITH_CC(s390_vpksgs);
15621
15622 INTRINSIC_WITH_CC(s390_vpklshs);
15623 INTRINSIC_WITH_CC(s390_vpklsfs);
15624 INTRINSIC_WITH_CC(s390_vpklsgs);
15625
15626 INTRINSIC_WITH_CC(s390_vceqbs);
15627 INTRINSIC_WITH_CC(s390_vceqhs);
15628 INTRINSIC_WITH_CC(s390_vceqfs);
15629 INTRINSIC_WITH_CC(s390_vceqgs);
15630
15631 INTRINSIC_WITH_CC(s390_vchbs);
15632 INTRINSIC_WITH_CC(s390_vchhs);
15633 INTRINSIC_WITH_CC(s390_vchfs);
15634 INTRINSIC_WITH_CC(s390_vchgs);
15635
15636 INTRINSIC_WITH_CC(s390_vchlbs);
15637 INTRINSIC_WITH_CC(s390_vchlhs);
15638 INTRINSIC_WITH_CC(s390_vchlfs);
15639 INTRINSIC_WITH_CC(s390_vchlgs);
15640
15641 INTRINSIC_WITH_CC(s390_vfaebs);
15642 INTRINSIC_WITH_CC(s390_vfaehs);
15643 INTRINSIC_WITH_CC(s390_vfaefs);
15644
15645 INTRINSIC_WITH_CC(s390_vfaezbs);
15646 INTRINSIC_WITH_CC(s390_vfaezhs);
15647 INTRINSIC_WITH_CC(s390_vfaezfs);
15648
15649 INTRINSIC_WITH_CC(s390_vfeebs);
15650 INTRINSIC_WITH_CC(s390_vfeehs);
15651 INTRINSIC_WITH_CC(s390_vfeefs);
15652
15653 INTRINSIC_WITH_CC(s390_vfeezbs);
15654 INTRINSIC_WITH_CC(s390_vfeezhs);
15655 INTRINSIC_WITH_CC(s390_vfeezfs);
15656
15657 INTRINSIC_WITH_CC(s390_vfenebs);
15658 INTRINSIC_WITH_CC(s390_vfenehs);
15659 INTRINSIC_WITH_CC(s390_vfenefs);
15660
15661 INTRINSIC_WITH_CC(s390_vfenezbs);
15662 INTRINSIC_WITH_CC(s390_vfenezhs);
15663 INTRINSIC_WITH_CC(s390_vfenezfs);
15664
15665 INTRINSIC_WITH_CC(s390_vistrbs);
15666 INTRINSIC_WITH_CC(s390_vistrhs);
15667 INTRINSIC_WITH_CC(s390_vistrfs);
15668
15669 INTRINSIC_WITH_CC(s390_vstrcbs);
15670 INTRINSIC_WITH_CC(s390_vstrchs);
15671 INTRINSIC_WITH_CC(s390_vstrcfs);
15672
15673 INTRINSIC_WITH_CC(s390_vstrczbs);
15674 INTRINSIC_WITH_CC(s390_vstrczhs);
15675 INTRINSIC_WITH_CC(s390_vstrczfs);
15676
15677 INTRINSIC_WITH_CC(s390_vfcesbs);
15678 INTRINSIC_WITH_CC(s390_vfcedbs);
15679 INTRINSIC_WITH_CC(s390_vfchsbs);
15680 INTRINSIC_WITH_CC(s390_vfchdbs);
15681 INTRINSIC_WITH_CC(s390_vfchesbs);
15682 INTRINSIC_WITH_CC(s390_vfchedbs);
15683
15684 INTRINSIC_WITH_CC(s390_vftcisb);
15685 INTRINSIC_WITH_CC(s390_vftcidb);
15686
15687 INTRINSIC_WITH_CC(s390_vstrsb);
15688 INTRINSIC_WITH_CC(s390_vstrsh);
15689 INTRINSIC_WITH_CC(s390_vstrsf);
15690
15691 INTRINSIC_WITH_CC(s390_vstrszb);
15692 INTRINSIC_WITH_CC(s390_vstrszh);
15693 INTRINSIC_WITH_CC(s390_vstrszf);
15694
15695 #undef INTRINSIC_WITH_CC
15696
15697 default:
15698 return nullptr;
15699 }
15700 }
15701
15702 namespace {
15703 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
15704 struct NVPTXMmaLdstInfo {
15705 unsigned NumResults; // Number of elements to load/store
15706 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
15707 unsigned IID_col;
15708 unsigned IID_row;
15709 };
15710
15711 #define MMA_INTR(geom_op_type, layout) \
15712 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
15713 #define MMA_LDST(n, geom_op_type) \
15714 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
15715
getNVPTXMmaLdstInfo(unsigned BuiltinID)15716 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
15717 switch (BuiltinID) {
15718 // FP MMA loads
15719 case NVPTX::BI__hmma_m16n16k16_ld_a:
15720 return MMA_LDST(8, m16n16k16_load_a_f16);
15721 case NVPTX::BI__hmma_m16n16k16_ld_b:
15722 return MMA_LDST(8, m16n16k16_load_b_f16);
15723 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15724 return MMA_LDST(4, m16n16k16_load_c_f16);
15725 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15726 return MMA_LDST(8, m16n16k16_load_c_f32);
15727 case NVPTX::BI__hmma_m32n8k16_ld_a:
15728 return MMA_LDST(8, m32n8k16_load_a_f16);
15729 case NVPTX::BI__hmma_m32n8k16_ld_b:
15730 return MMA_LDST(8, m32n8k16_load_b_f16);
15731 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15732 return MMA_LDST(4, m32n8k16_load_c_f16);
15733 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15734 return MMA_LDST(8, m32n8k16_load_c_f32);
15735 case NVPTX::BI__hmma_m8n32k16_ld_a:
15736 return MMA_LDST(8, m8n32k16_load_a_f16);
15737 case NVPTX::BI__hmma_m8n32k16_ld_b:
15738 return MMA_LDST(8, m8n32k16_load_b_f16);
15739 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15740 return MMA_LDST(4, m8n32k16_load_c_f16);
15741 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15742 return MMA_LDST(8, m8n32k16_load_c_f32);
15743
15744 // Integer MMA loads
15745 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15746 return MMA_LDST(2, m16n16k16_load_a_s8);
15747 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15748 return MMA_LDST(2, m16n16k16_load_a_u8);
15749 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15750 return MMA_LDST(2, m16n16k16_load_b_s8);
15751 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15752 return MMA_LDST(2, m16n16k16_load_b_u8);
15753 case NVPTX::BI__imma_m16n16k16_ld_c:
15754 return MMA_LDST(8, m16n16k16_load_c_s32);
15755 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15756 return MMA_LDST(4, m32n8k16_load_a_s8);
15757 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15758 return MMA_LDST(4, m32n8k16_load_a_u8);
15759 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15760 return MMA_LDST(1, m32n8k16_load_b_s8);
15761 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15762 return MMA_LDST(1, m32n8k16_load_b_u8);
15763 case NVPTX::BI__imma_m32n8k16_ld_c:
15764 return MMA_LDST(8, m32n8k16_load_c_s32);
15765 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15766 return MMA_LDST(1, m8n32k16_load_a_s8);
15767 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15768 return MMA_LDST(1, m8n32k16_load_a_u8);
15769 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15770 return MMA_LDST(4, m8n32k16_load_b_s8);
15771 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15772 return MMA_LDST(4, m8n32k16_load_b_u8);
15773 case NVPTX::BI__imma_m8n32k16_ld_c:
15774 return MMA_LDST(8, m8n32k16_load_c_s32);
15775
15776 // Sub-integer MMA loads.
15777 // Only row/col layout is supported by A/B fragments.
15778 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15779 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
15780 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15781 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
15782 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15783 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
15784 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15785 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
15786 case NVPTX::BI__imma_m8n8k32_ld_c:
15787 return MMA_LDST(2, m8n8k32_load_c_s32);
15788 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15789 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
15790 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15791 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
15792 case NVPTX::BI__bmma_m8n8k128_ld_c:
15793 return MMA_LDST(2, m8n8k128_load_c_s32);
15794
15795 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
15796 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
15797 // use fragment C for both loads and stores.
15798 // FP MMA stores.
15799 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15800 return MMA_LDST(4, m16n16k16_store_d_f16);
15801 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15802 return MMA_LDST(8, m16n16k16_store_d_f32);
15803 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15804 return MMA_LDST(4, m32n8k16_store_d_f16);
15805 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15806 return MMA_LDST(8, m32n8k16_store_d_f32);
15807 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15808 return MMA_LDST(4, m8n32k16_store_d_f16);
15809 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15810 return MMA_LDST(8, m8n32k16_store_d_f32);
15811
15812 // Integer and sub-integer MMA stores.
15813 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
15814 // name, integer loads/stores use LLVM's i32.
15815 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15816 return MMA_LDST(8, m16n16k16_store_d_s32);
15817 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15818 return MMA_LDST(8, m32n8k16_store_d_s32);
15819 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15820 return MMA_LDST(8, m8n32k16_store_d_s32);
15821 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15822 return MMA_LDST(2, m8n8k32_store_d_s32);
15823 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
15824 return MMA_LDST(2, m8n8k128_store_d_s32);
15825
15826 default:
15827 llvm_unreachable("Unknown MMA builtin");
15828 }
15829 }
15830 #undef MMA_LDST
15831 #undef MMA_INTR
15832
15833
15834 struct NVPTXMmaInfo {
15835 unsigned NumEltsA;
15836 unsigned NumEltsB;
15837 unsigned NumEltsC;
15838 unsigned NumEltsD;
15839 std::array<unsigned, 8> Variants;
15840
getMMAIntrinsic__anon777ffcf60e11::NVPTXMmaInfo15841 unsigned getMMAIntrinsic(int Layout, bool Satf) {
15842 unsigned Index = Layout * 2 + Satf;
15843 if (Index >= Variants.size())
15844 return 0;
15845 return Variants[Index];
15846 }
15847 };
15848
15849 // Returns an intrinsic that matches Layout and Satf for valid combinations of
15850 // Layout and Satf, 0 otherwise.
getNVPTXMmaInfo(unsigned BuiltinID)15851 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
15852 // clang-format off
15853 #define MMA_VARIANTS(geom, type) {{ \
15854 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
15855 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
15856 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15857 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15858 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
15859 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
15860 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
15861 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
15862 }}
15863 // Sub-integer MMA only supports row.col layout.
15864 #define MMA_VARIANTS_I4(geom, type) {{ \
15865 0, \
15866 0, \
15867 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15868 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15869 0, \
15870 0, \
15871 0, \
15872 0 \
15873 }}
15874 // b1 MMA does not support .satfinite.
15875 #define MMA_VARIANTS_B1(geom, type) {{ \
15876 0, \
15877 0, \
15878 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15879 0, \
15880 0, \
15881 0, \
15882 0, \
15883 0 \
15884 }}
15885 // clang-format on
15886 switch (BuiltinID) {
15887 // FP MMA
15888 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
15889 // NumEltsN of return value are ordered as A,B,C,D.
15890 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15891 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
15892 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15893 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
15894 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15895 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
15896 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15897 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
15898 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15899 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
15900 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15901 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
15902 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15903 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
15904 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15905 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
15906 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15907 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
15908 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15909 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
15910 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15911 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
15912 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15913 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
15914
15915 // Integer MMA
15916 case NVPTX::BI__imma_m16n16k16_mma_s8:
15917 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
15918 case NVPTX::BI__imma_m16n16k16_mma_u8:
15919 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
15920 case NVPTX::BI__imma_m32n8k16_mma_s8:
15921 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
15922 case NVPTX::BI__imma_m32n8k16_mma_u8:
15923 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
15924 case NVPTX::BI__imma_m8n32k16_mma_s8:
15925 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
15926 case NVPTX::BI__imma_m8n32k16_mma_u8:
15927 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
15928
15929 // Sub-integer MMA
15930 case NVPTX::BI__imma_m8n8k32_mma_s4:
15931 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
15932 case NVPTX::BI__imma_m8n8k32_mma_u4:
15933 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
15934 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
15935 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
15936 default:
15937 llvm_unreachable("Unexpected builtin ID.");
15938 }
15939 #undef MMA_VARIANTS
15940 #undef MMA_VARIANTS_I4
15941 #undef MMA_VARIANTS_B1
15942 }
15943
15944 } // namespace
15945
15946 Value *
EmitNVPTXBuiltinExpr(unsigned BuiltinID,const CallExpr * E)15947 CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
15948 auto MakeLdg = [&](unsigned IntrinsicID) {
15949 Value *Ptr = EmitScalarExpr(E->getArg(0));
15950 clang::CharUnits Align =
15951 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
15952 return Builder.CreateCall(
15953 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15954 Ptr->getType()}),
15955 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
15956 };
15957 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
15958 Value *Ptr = EmitScalarExpr(E->getArg(0));
15959 return Builder.CreateCall(
15960 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15961 Ptr->getType()}),
15962 {Ptr, EmitScalarExpr(E->getArg(1))});
15963 };
15964 switch (BuiltinID) {
15965 case NVPTX::BI__nvvm_atom_add_gen_i:
15966 case NVPTX::BI__nvvm_atom_add_gen_l:
15967 case NVPTX::BI__nvvm_atom_add_gen_ll:
15968 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
15969
15970 case NVPTX::BI__nvvm_atom_sub_gen_i:
15971 case NVPTX::BI__nvvm_atom_sub_gen_l:
15972 case NVPTX::BI__nvvm_atom_sub_gen_ll:
15973 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
15974
15975 case NVPTX::BI__nvvm_atom_and_gen_i:
15976 case NVPTX::BI__nvvm_atom_and_gen_l:
15977 case NVPTX::BI__nvvm_atom_and_gen_ll:
15978 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
15979
15980 case NVPTX::BI__nvvm_atom_or_gen_i:
15981 case NVPTX::BI__nvvm_atom_or_gen_l:
15982 case NVPTX::BI__nvvm_atom_or_gen_ll:
15983 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
15984
15985 case NVPTX::BI__nvvm_atom_xor_gen_i:
15986 case NVPTX::BI__nvvm_atom_xor_gen_l:
15987 case NVPTX::BI__nvvm_atom_xor_gen_ll:
15988 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
15989
15990 case NVPTX::BI__nvvm_atom_xchg_gen_i:
15991 case NVPTX::BI__nvvm_atom_xchg_gen_l:
15992 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
15993 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
15994
15995 case NVPTX::BI__nvvm_atom_max_gen_i:
15996 case NVPTX::BI__nvvm_atom_max_gen_l:
15997 case NVPTX::BI__nvvm_atom_max_gen_ll:
15998 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
15999
16000 case NVPTX::BI__nvvm_atom_max_gen_ui:
16001 case NVPTX::BI__nvvm_atom_max_gen_ul:
16002 case NVPTX::BI__nvvm_atom_max_gen_ull:
16003 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
16004
16005 case NVPTX::BI__nvvm_atom_min_gen_i:
16006 case NVPTX::BI__nvvm_atom_min_gen_l:
16007 case NVPTX::BI__nvvm_atom_min_gen_ll:
16008 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
16009
16010 case NVPTX::BI__nvvm_atom_min_gen_ui:
16011 case NVPTX::BI__nvvm_atom_min_gen_ul:
16012 case NVPTX::BI__nvvm_atom_min_gen_ull:
16013 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
16014
16015 case NVPTX::BI__nvvm_atom_cas_gen_i:
16016 case NVPTX::BI__nvvm_atom_cas_gen_l:
16017 case NVPTX::BI__nvvm_atom_cas_gen_ll:
16018 // __nvvm_atom_cas_gen_* should return the old value rather than the
16019 // success flag.
16020 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
16021
16022 case NVPTX::BI__nvvm_atom_add_gen_f:
16023 case NVPTX::BI__nvvm_atom_add_gen_d: {
16024 Value *Ptr = EmitScalarExpr(E->getArg(0));
16025 Value *Val = EmitScalarExpr(E->getArg(1));
16026 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
16027 AtomicOrdering::SequentiallyConsistent);
16028 }
16029
16030 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
16031 Value *Ptr = EmitScalarExpr(E->getArg(0));
16032 Value *Val = EmitScalarExpr(E->getArg(1));
16033 Function *FnALI32 =
16034 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
16035 return Builder.CreateCall(FnALI32, {Ptr, Val});
16036 }
16037
16038 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
16039 Value *Ptr = EmitScalarExpr(E->getArg(0));
16040 Value *Val = EmitScalarExpr(E->getArg(1));
16041 Function *FnALD32 =
16042 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
16043 return Builder.CreateCall(FnALD32, {Ptr, Val});
16044 }
16045
16046 case NVPTX::BI__nvvm_ldg_c:
16047 case NVPTX::BI__nvvm_ldg_c2:
16048 case NVPTX::BI__nvvm_ldg_c4:
16049 case NVPTX::BI__nvvm_ldg_s:
16050 case NVPTX::BI__nvvm_ldg_s2:
16051 case NVPTX::BI__nvvm_ldg_s4:
16052 case NVPTX::BI__nvvm_ldg_i:
16053 case NVPTX::BI__nvvm_ldg_i2:
16054 case NVPTX::BI__nvvm_ldg_i4:
16055 case NVPTX::BI__nvvm_ldg_l:
16056 case NVPTX::BI__nvvm_ldg_ll:
16057 case NVPTX::BI__nvvm_ldg_ll2:
16058 case NVPTX::BI__nvvm_ldg_uc:
16059 case NVPTX::BI__nvvm_ldg_uc2:
16060 case NVPTX::BI__nvvm_ldg_uc4:
16061 case NVPTX::BI__nvvm_ldg_us:
16062 case NVPTX::BI__nvvm_ldg_us2:
16063 case NVPTX::BI__nvvm_ldg_us4:
16064 case NVPTX::BI__nvvm_ldg_ui:
16065 case NVPTX::BI__nvvm_ldg_ui2:
16066 case NVPTX::BI__nvvm_ldg_ui4:
16067 case NVPTX::BI__nvvm_ldg_ul:
16068 case NVPTX::BI__nvvm_ldg_ull:
16069 case NVPTX::BI__nvvm_ldg_ull2:
16070 // PTX Interoperability section 2.2: "For a vector with an even number of
16071 // elements, its alignment is set to number of elements times the alignment
16072 // of its member: n*alignof(t)."
16073 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
16074 case NVPTX::BI__nvvm_ldg_f:
16075 case NVPTX::BI__nvvm_ldg_f2:
16076 case NVPTX::BI__nvvm_ldg_f4:
16077 case NVPTX::BI__nvvm_ldg_d:
16078 case NVPTX::BI__nvvm_ldg_d2:
16079 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
16080
16081 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
16082 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
16083 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
16084 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
16085 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
16086 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
16087 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
16088 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
16089 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
16090 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
16091 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
16092 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
16093 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
16094 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
16095 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
16096 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
16097 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
16098 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
16099 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
16100 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
16101 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
16102 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
16103 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
16104 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
16105 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
16106 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
16107 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
16108 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
16109 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
16110 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
16111 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
16112 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
16113 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
16114 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
16115 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
16116 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
16117 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
16118 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
16119 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
16120 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
16121 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
16122 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
16123 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
16124 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
16125 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
16126 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
16127 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
16128 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
16129 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
16130 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
16131 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
16132 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
16133 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
16134 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
16135 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
16136 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
16137 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
16138 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
16139 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
16140 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
16141 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
16142 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
16143 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
16144 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
16145 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
16146 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
16147 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
16148 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
16149 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
16150 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
16151 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
16152 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
16153 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
16154 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
16155 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
16156 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
16157 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
16158 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
16159 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
16160 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
16161 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
16162 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
16163 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
16164 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
16165 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
16166 Value *Ptr = EmitScalarExpr(E->getArg(0));
16167 return Builder.CreateCall(
16168 CGM.getIntrinsic(
16169 Intrinsic::nvvm_atomic_cas_gen_i_cta,
16170 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16171 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16172 }
16173 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
16174 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
16175 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
16176 Value *Ptr = EmitScalarExpr(E->getArg(0));
16177 return Builder.CreateCall(
16178 CGM.getIntrinsic(
16179 Intrinsic::nvvm_atomic_cas_gen_i_sys,
16180 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16181 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16182 }
16183 case NVPTX::BI__nvvm_match_all_sync_i32p:
16184 case NVPTX::BI__nvvm_match_all_sync_i64p: {
16185 Value *Mask = EmitScalarExpr(E->getArg(0));
16186 Value *Val = EmitScalarExpr(E->getArg(1));
16187 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
16188 Value *ResultPair = Builder.CreateCall(
16189 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
16190 ? Intrinsic::nvvm_match_all_sync_i32p
16191 : Intrinsic::nvvm_match_all_sync_i64p),
16192 {Mask, Val});
16193 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
16194 PredOutPtr.getElementType());
16195 Builder.CreateStore(Pred, PredOutPtr);
16196 return Builder.CreateExtractValue(ResultPair, 0);
16197 }
16198
16199 // FP MMA loads
16200 case NVPTX::BI__hmma_m16n16k16_ld_a:
16201 case NVPTX::BI__hmma_m16n16k16_ld_b:
16202 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
16203 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
16204 case NVPTX::BI__hmma_m32n8k16_ld_a:
16205 case NVPTX::BI__hmma_m32n8k16_ld_b:
16206 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
16207 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
16208 case NVPTX::BI__hmma_m8n32k16_ld_a:
16209 case NVPTX::BI__hmma_m8n32k16_ld_b:
16210 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
16211 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
16212 // Integer MMA loads.
16213 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
16214 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
16215 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
16216 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
16217 case NVPTX::BI__imma_m16n16k16_ld_c:
16218 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
16219 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
16220 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
16221 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
16222 case NVPTX::BI__imma_m32n8k16_ld_c:
16223 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
16224 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
16225 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
16226 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
16227 case NVPTX::BI__imma_m8n32k16_ld_c:
16228 // Sub-integer MMA loads.
16229 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
16230 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
16231 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
16232 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
16233 case NVPTX::BI__imma_m8n8k32_ld_c:
16234 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
16235 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
16236 case NVPTX::BI__bmma_m8n8k128_ld_c:
16237 {
16238 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16239 Value *Src = EmitScalarExpr(E->getArg(1));
16240 Value *Ldm = EmitScalarExpr(E->getArg(2));
16241 Optional<llvm::APSInt> isColMajorArg =
16242 E->getArg(3)->getIntegerConstantExpr(getContext());
16243 if (!isColMajorArg)
16244 return nullptr;
16245 bool isColMajor = isColMajorArg->getSExtValue();
16246 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16247 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16248 if (IID == 0)
16249 return nullptr;
16250
16251 Value *Result =
16252 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
16253
16254 // Save returned values.
16255 assert(II.NumResults);
16256 if (II.NumResults == 1) {
16257 Builder.CreateAlignedStore(Result, Dst.getPointer(),
16258 CharUnits::fromQuantity(4));
16259 } else {
16260 for (unsigned i = 0; i < II.NumResults; ++i) {
16261 Builder.CreateAlignedStore(
16262 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
16263 Dst.getElementType()),
16264 Builder.CreateGEP(Dst.getPointer(),
16265 llvm::ConstantInt::get(IntTy, i)),
16266 CharUnits::fromQuantity(4));
16267 }
16268 }
16269 return Result;
16270 }
16271
16272 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
16273 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
16274 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
16275 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
16276 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
16277 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
16278 case NVPTX::BI__imma_m16n16k16_st_c_i32:
16279 case NVPTX::BI__imma_m32n8k16_st_c_i32:
16280 case NVPTX::BI__imma_m8n32k16_st_c_i32:
16281 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16282 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
16283 Value *Dst = EmitScalarExpr(E->getArg(0));
16284 Address Src = EmitPointerWithAlignment(E->getArg(1));
16285 Value *Ldm = EmitScalarExpr(E->getArg(2));
16286 Optional<llvm::APSInt> isColMajorArg =
16287 E->getArg(3)->getIntegerConstantExpr(getContext());
16288 if (!isColMajorArg)
16289 return nullptr;
16290 bool isColMajor = isColMajorArg->getSExtValue();
16291 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16292 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16293 if (IID == 0)
16294 return nullptr;
16295 Function *Intrinsic =
16296 CGM.getIntrinsic(IID, Dst->getType());
16297 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
16298 SmallVector<Value *, 10> Values = {Dst};
16299 for (unsigned i = 0; i < II.NumResults; ++i) {
16300 Value *V = Builder.CreateAlignedLoad(
16301 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16302 CharUnits::fromQuantity(4));
16303 Values.push_back(Builder.CreateBitCast(V, ParamType));
16304 }
16305 Values.push_back(Ldm);
16306 Value *Result = Builder.CreateCall(Intrinsic, Values);
16307 return Result;
16308 }
16309
16310 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
16311 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
16312 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16313 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16314 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16315 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16316 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16317 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16318 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16319 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16320 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16321 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16322 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16323 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16324 case NVPTX::BI__imma_m16n16k16_mma_s8:
16325 case NVPTX::BI__imma_m16n16k16_mma_u8:
16326 case NVPTX::BI__imma_m32n8k16_mma_s8:
16327 case NVPTX::BI__imma_m32n8k16_mma_u8:
16328 case NVPTX::BI__imma_m8n32k16_mma_s8:
16329 case NVPTX::BI__imma_m8n32k16_mma_u8:
16330 case NVPTX::BI__imma_m8n8k32_mma_s4:
16331 case NVPTX::BI__imma_m8n8k32_mma_u4:
16332 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
16333 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16334 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
16335 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
16336 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
16337 Optional<llvm::APSInt> LayoutArg =
16338 E->getArg(4)->getIntegerConstantExpr(getContext());
16339 if (!LayoutArg)
16340 return nullptr;
16341 int Layout = LayoutArg->getSExtValue();
16342 if (Layout < 0 || Layout > 3)
16343 return nullptr;
16344 llvm::APSInt SatfArg;
16345 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
16346 SatfArg = 0; // .b1 does not have satf argument.
16347 else if (Optional<llvm::APSInt> OptSatfArg =
16348 E->getArg(5)->getIntegerConstantExpr(getContext()))
16349 SatfArg = *OptSatfArg;
16350 else
16351 return nullptr;
16352 bool Satf = SatfArg.getSExtValue();
16353 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
16354 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
16355 if (IID == 0) // Unsupported combination of Layout/Satf.
16356 return nullptr;
16357
16358 SmallVector<Value *, 24> Values;
16359 Function *Intrinsic = CGM.getIntrinsic(IID);
16360 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
16361 // Load A
16362 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
16363 Value *V = Builder.CreateAlignedLoad(
16364 Builder.CreateGEP(SrcA.getPointer(),
16365 llvm::ConstantInt::get(IntTy, i)),
16366 CharUnits::fromQuantity(4));
16367 Values.push_back(Builder.CreateBitCast(V, AType));
16368 }
16369 // Load B
16370 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
16371 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
16372 Value *V = Builder.CreateAlignedLoad(
16373 Builder.CreateGEP(SrcB.getPointer(),
16374 llvm::ConstantInt::get(IntTy, i)),
16375 CharUnits::fromQuantity(4));
16376 Values.push_back(Builder.CreateBitCast(V, BType));
16377 }
16378 // Load C
16379 llvm::Type *CType =
16380 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
16381 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
16382 Value *V = Builder.CreateAlignedLoad(
16383 Builder.CreateGEP(SrcC.getPointer(),
16384 llvm::ConstantInt::get(IntTy, i)),
16385 CharUnits::fromQuantity(4));
16386 Values.push_back(Builder.CreateBitCast(V, CType));
16387 }
16388 Value *Result = Builder.CreateCall(Intrinsic, Values);
16389 llvm::Type *DType = Dst.getElementType();
16390 for (unsigned i = 0; i < MI.NumEltsD; ++i)
16391 Builder.CreateAlignedStore(
16392 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
16393 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16394 CharUnits::fromQuantity(4));
16395 return Result;
16396 }
16397 default:
16398 return nullptr;
16399 }
16400 }
16401
16402 namespace {
16403 struct BuiltinAlignArgs {
16404 llvm::Value *Src = nullptr;
16405 llvm::Type *SrcType = nullptr;
16406 llvm::Value *Alignment = nullptr;
16407 llvm::Value *Mask = nullptr;
16408 llvm::IntegerType *IntType = nullptr;
16409
BuiltinAlignArgs__anon777ffcf61111::BuiltinAlignArgs16410 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
16411 QualType AstType = E->getArg(0)->getType();
16412 if (AstType->isArrayType())
16413 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
16414 else
16415 Src = CGF.EmitScalarExpr(E->getArg(0));
16416 SrcType = Src->getType();
16417 if (SrcType->isPointerTy()) {
16418 IntType = IntegerType::get(
16419 CGF.getLLVMContext(),
16420 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
16421 } else {
16422 assert(SrcType->isIntegerTy());
16423 IntType = cast<llvm::IntegerType>(SrcType);
16424 }
16425 Alignment = CGF.EmitScalarExpr(E->getArg(1));
16426 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
16427 auto *One = llvm::ConstantInt::get(IntType, 1);
16428 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
16429 }
16430 };
16431 } // namespace
16432
16433 /// Generate (x & (y-1)) == 0.
EmitBuiltinIsAligned(const CallExpr * E)16434 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
16435 BuiltinAlignArgs Args(E, *this);
16436 llvm::Value *SrcAddress = Args.Src;
16437 if (Args.SrcType->isPointerTy())
16438 SrcAddress =
16439 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
16440 return RValue::get(Builder.CreateICmpEQ(
16441 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
16442 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
16443 }
16444
16445 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
16446 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
16447 /// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
16448 /// TODO: actually use ptrmask once most optimization passes know about it.
EmitBuiltinAlignTo(const CallExpr * E,bool AlignUp)16449 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
16450 BuiltinAlignArgs Args(E, *this);
16451 llvm::Value *SrcAddr = Args.Src;
16452 if (Args.Src->getType()->isPointerTy())
16453 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
16454 llvm::Value *SrcForMask = SrcAddr;
16455 if (AlignUp) {
16456 // When aligning up we have to first add the mask to ensure we go over the
16457 // next alignment value and then align down to the next valid multiple.
16458 // By adding the mask, we ensure that align_up on an already aligned
16459 // value will not change the value.
16460 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16461 }
16462 // Invert the mask to only clear the lower bits.
16463 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16464 llvm::Value *Result =
16465 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16466 if (Args.Src->getType()->isPointerTy()) {
16467 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16468 // Result = Builder.CreateIntrinsic(
16469 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16470 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16471 Result->setName("aligned_intptr");
16472 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16473 // The result must point to the same underlying allocation. This means we
16474 // can use an inbounds GEP to enable better optimization.
16475 Value *Base = EmitCastToVoidPtr(Args.Src);
16476 if (getLangOpts().isSignedOverflowDefined())
16477 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16478 else
16479 Result = EmitCheckedInBoundsGEP(Base, Difference,
16480 /*SignedIndices=*/true,
16481 /*isSubtraction=*/!AlignUp,
16482 E->getExprLoc(), "aligned_result");
16483 Result = Builder.CreatePointerCast(Result, Args.SrcType);
16484 // Emit an alignment assumption to ensure that the new alignment is
16485 // propagated to loads/stores, etc.
16486 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
16487 }
16488 assert(Result->getType() == Args.SrcType);
16489 return RValue::get(Result);
16490 }
16491
EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,const CallExpr * E)16492 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
16493 const CallExpr *E) {
16494 switch (BuiltinID) {
16495 case WebAssembly::BI__builtin_wasm_memory_size: {
16496 llvm::Type *ResultType = ConvertType(E->getType());
16497 Value *I = EmitScalarExpr(E->getArg(0));
16498 Function *Callee =
16499 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
16500 return Builder.CreateCall(Callee, I);
16501 }
16502 case WebAssembly::BI__builtin_wasm_memory_grow: {
16503 llvm::Type *ResultType = ConvertType(E->getType());
16504 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
16505 EmitScalarExpr(E->getArg(1))};
16506 Function *Callee =
16507 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
16508 return Builder.CreateCall(Callee, Args);
16509 }
16510 case WebAssembly::BI__builtin_wasm_tls_size: {
16511 llvm::Type *ResultType = ConvertType(E->getType());
16512 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
16513 return Builder.CreateCall(Callee);
16514 }
16515 case WebAssembly::BI__builtin_wasm_tls_align: {
16516 llvm::Type *ResultType = ConvertType(E->getType());
16517 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
16518 return Builder.CreateCall(Callee);
16519 }
16520 case WebAssembly::BI__builtin_wasm_tls_base: {
16521 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
16522 return Builder.CreateCall(Callee);
16523 }
16524 case WebAssembly::BI__builtin_wasm_throw: {
16525 Value *Tag = EmitScalarExpr(E->getArg(0));
16526 Value *Obj = EmitScalarExpr(E->getArg(1));
16527 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
16528 return Builder.CreateCall(Callee, {Tag, Obj});
16529 }
16530 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
16531 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
16532 return Builder.CreateCall(Callee);
16533 }
16534 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
16535 Value *Addr = EmitScalarExpr(E->getArg(0));
16536 Value *Expected = EmitScalarExpr(E->getArg(1));
16537 Value *Timeout = EmitScalarExpr(E->getArg(2));
16538 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
16539 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16540 }
16541 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
16542 Value *Addr = EmitScalarExpr(E->getArg(0));
16543 Value *Expected = EmitScalarExpr(E->getArg(1));
16544 Value *Timeout = EmitScalarExpr(E->getArg(2));
16545 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
16546 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16547 }
16548 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
16549 Value *Addr = EmitScalarExpr(E->getArg(0));
16550 Value *Count = EmitScalarExpr(E->getArg(1));
16551 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
16552 return Builder.CreateCall(Callee, {Addr, Count});
16553 }
16554 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
16555 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
16556 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
16557 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
16558 Value *Src = EmitScalarExpr(E->getArg(0));
16559 llvm::Type *ResT = ConvertType(E->getType());
16560 Function *Callee =
16561 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
16562 return Builder.CreateCall(Callee, {Src});
16563 }
16564 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
16565 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
16566 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
16567 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
16568 Value *Src = EmitScalarExpr(E->getArg(0));
16569 llvm::Type *ResT = ConvertType(E->getType());
16570 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
16571 {ResT, Src->getType()});
16572 return Builder.CreateCall(Callee, {Src});
16573 }
16574 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
16575 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
16576 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
16577 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
16578 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
16579 Value *Src = EmitScalarExpr(E->getArg(0));
16580 llvm::Type *ResT = ConvertType(E->getType());
16581 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
16582 {ResT, Src->getType()});
16583 return Builder.CreateCall(Callee, {Src});
16584 }
16585 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
16586 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
16587 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
16588 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
16589 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
16590 Value *Src = EmitScalarExpr(E->getArg(0));
16591 llvm::Type *ResT = ConvertType(E->getType());
16592 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
16593 {ResT, Src->getType()});
16594 return Builder.CreateCall(Callee, {Src});
16595 }
16596 case WebAssembly::BI__builtin_wasm_min_f32:
16597 case WebAssembly::BI__builtin_wasm_min_f64:
16598 case WebAssembly::BI__builtin_wasm_min_f32x4:
16599 case WebAssembly::BI__builtin_wasm_min_f64x2: {
16600 Value *LHS = EmitScalarExpr(E->getArg(0));
16601 Value *RHS = EmitScalarExpr(E->getArg(1));
16602 Function *Callee =
16603 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
16604 return Builder.CreateCall(Callee, {LHS, RHS});
16605 }
16606 case WebAssembly::BI__builtin_wasm_max_f32:
16607 case WebAssembly::BI__builtin_wasm_max_f64:
16608 case WebAssembly::BI__builtin_wasm_max_f32x4:
16609 case WebAssembly::BI__builtin_wasm_max_f64x2: {
16610 Value *LHS = EmitScalarExpr(E->getArg(0));
16611 Value *RHS = EmitScalarExpr(E->getArg(1));
16612 Function *Callee =
16613 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
16614 return Builder.CreateCall(Callee, {LHS, RHS});
16615 }
16616 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
16617 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
16618 Value *LHS = EmitScalarExpr(E->getArg(0));
16619 Value *RHS = EmitScalarExpr(E->getArg(1));
16620 Function *Callee =
16621 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
16622 return Builder.CreateCall(Callee, {LHS, RHS});
16623 }
16624 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
16625 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
16626 Value *LHS = EmitScalarExpr(E->getArg(0));
16627 Value *RHS = EmitScalarExpr(E->getArg(1));
16628 Function *Callee =
16629 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
16630 return Builder.CreateCall(Callee, {LHS, RHS});
16631 }
16632 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16633 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16634 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16635 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16636 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16637 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16638 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16639 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
16640 unsigned IntNo;
16641 switch (BuiltinID) {
16642 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16643 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16644 IntNo = Intrinsic::wasm_ceil;
16645 break;
16646 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16647 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16648 IntNo = Intrinsic::wasm_floor;
16649 break;
16650 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16651 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16652 IntNo = Intrinsic::wasm_trunc;
16653 break;
16654 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16655 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
16656 IntNo = Intrinsic::wasm_nearest;
16657 break;
16658 default:
16659 llvm_unreachable("unexpected builtin ID");
16660 }
16661 Value *Value = EmitScalarExpr(E->getArg(0));
16662 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16663 return Builder.CreateCall(Callee, Value);
16664 }
16665 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
16666 Value *Src = EmitScalarExpr(E->getArg(0));
16667 Value *Indices = EmitScalarExpr(E->getArg(1));
16668 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
16669 return Builder.CreateCall(Callee, {Src, Indices});
16670 }
16671 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16672 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16673 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16674 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16675 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16676 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16677 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16678 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
16679 llvm::APSInt LaneConst =
16680 *E->getArg(1)->getIntegerConstantExpr(getContext());
16681 Value *Vec = EmitScalarExpr(E->getArg(0));
16682 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16683 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
16684 switch (BuiltinID) {
16685 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16686 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16687 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
16688 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16689 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16690 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
16691 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16692 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16693 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16694 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
16695 return Extract;
16696 default:
16697 llvm_unreachable("unexpected builtin ID");
16698 }
16699 }
16700 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16701 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
16702 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16703 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16704 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16705 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
16706 llvm::APSInt LaneConst =
16707 *E->getArg(1)->getIntegerConstantExpr(getContext());
16708 Value *Vec = EmitScalarExpr(E->getArg(0));
16709 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16710 Value *Val = EmitScalarExpr(E->getArg(2));
16711 switch (BuiltinID) {
16712 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16713 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
16714 llvm::Type *ElemType =
16715 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
16716 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
16717 return Builder.CreateInsertElement(Vec, Trunc, Lane);
16718 }
16719 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16720 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16721 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16722 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
16723 return Builder.CreateInsertElement(Vec, Val, Lane);
16724 default:
16725 llvm_unreachable("unexpected builtin ID");
16726 }
16727 }
16728 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16729 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16730 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16731 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16732 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16733 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16734 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16735 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
16736 unsigned IntNo;
16737 switch (BuiltinID) {
16738 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16739 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16740 IntNo = Intrinsic::sadd_sat;
16741 break;
16742 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16743 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16744 IntNo = Intrinsic::uadd_sat;
16745 break;
16746 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16747 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16748 IntNo = Intrinsic::wasm_sub_saturate_signed;
16749 break;
16750 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16751 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
16752 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
16753 break;
16754 default:
16755 llvm_unreachable("unexpected builtin ID");
16756 }
16757 Value *LHS = EmitScalarExpr(E->getArg(0));
16758 Value *RHS = EmitScalarExpr(E->getArg(1));
16759 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16760 return Builder.CreateCall(Callee, {LHS, RHS});
16761 }
16762 case WebAssembly::BI__builtin_wasm_abs_i8x16:
16763 case WebAssembly::BI__builtin_wasm_abs_i16x8:
16764 case WebAssembly::BI__builtin_wasm_abs_i32x4: {
16765 Value *Vec = EmitScalarExpr(E->getArg(0));
16766 Value *Neg = Builder.CreateNeg(Vec, "neg");
16767 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
16768 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
16769 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
16770 }
16771 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16772 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16773 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16774 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16775 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16776 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16777 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16778 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16779 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16780 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16781 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16782 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
16783 Value *LHS = EmitScalarExpr(E->getArg(0));
16784 Value *RHS = EmitScalarExpr(E->getArg(1));
16785 Value *ICmp;
16786 switch (BuiltinID) {
16787 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16788 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16789 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16790 ICmp = Builder.CreateICmpSLT(LHS, RHS);
16791 break;
16792 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16793 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16794 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16795 ICmp = Builder.CreateICmpULT(LHS, RHS);
16796 break;
16797 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16798 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16799 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16800 ICmp = Builder.CreateICmpSGT(LHS, RHS);
16801 break;
16802 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16803 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16804 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
16805 ICmp = Builder.CreateICmpUGT(LHS, RHS);
16806 break;
16807 default:
16808 llvm_unreachable("unexpected builtin ID");
16809 }
16810 return Builder.CreateSelect(ICmp, LHS, RHS);
16811 }
16812 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
16813 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
16814 Value *LHS = EmitScalarExpr(E->getArg(0));
16815 Value *RHS = EmitScalarExpr(E->getArg(1));
16816 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
16817 ConvertType(E->getType()));
16818 return Builder.CreateCall(Callee, {LHS, RHS});
16819 }
16820 case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i16x8: {
16821 Value *LHS = EmitScalarExpr(E->getArg(0));
16822 Value *RHS = EmitScalarExpr(E->getArg(1));
16823 Function *Callee =
16824 CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
16825 return Builder.CreateCall(Callee, {LHS, RHS});
16826 }
16827 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
16828 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
16829 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
16830 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
16831 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
16832 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
16833 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
16834 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
16835 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
16836 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
16837 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
16838 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2: {
16839 Value *LHS = EmitScalarExpr(E->getArg(0));
16840 Value *RHS = EmitScalarExpr(E->getArg(1));
16841 unsigned IntNo;
16842 switch (BuiltinID) {
16843 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
16844 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
16845 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
16846 IntNo = Intrinsic::wasm_extmul_low_signed;
16847 break;
16848 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
16849 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
16850 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
16851 IntNo = Intrinsic::wasm_extmul_low_unsigned;
16852 break;
16853 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
16854 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
16855 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
16856 IntNo = Intrinsic::wasm_extmul_high_signed;
16857 break;
16858 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
16859 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
16860 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2:
16861 IntNo = Intrinsic::wasm_extmul_high_unsigned;
16862 break;
16863 default:
16864 llvm_unreachable("unexptected builtin ID");
16865 }
16866
16867 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16868 return Builder.CreateCall(Callee, {LHS, RHS});
16869 }
16870 case WebAssembly::BI__builtin_wasm_bitselect: {
16871 Value *V1 = EmitScalarExpr(E->getArg(0));
16872 Value *V2 = EmitScalarExpr(E->getArg(1));
16873 Value *C = EmitScalarExpr(E->getArg(2));
16874 Function *Callee =
16875 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
16876 return Builder.CreateCall(Callee, {V1, V2, C});
16877 }
16878 case WebAssembly::BI__builtin_wasm_signselect_i8x16:
16879 case WebAssembly::BI__builtin_wasm_signselect_i16x8:
16880 case WebAssembly::BI__builtin_wasm_signselect_i32x4:
16881 case WebAssembly::BI__builtin_wasm_signselect_i64x2: {
16882 Value *V1 = EmitScalarExpr(E->getArg(0));
16883 Value *V2 = EmitScalarExpr(E->getArg(1));
16884 Value *C = EmitScalarExpr(E->getArg(2));
16885 Function *Callee =
16886 CGM.getIntrinsic(Intrinsic::wasm_signselect, ConvertType(E->getType()));
16887 return Builder.CreateCall(Callee, {V1, V2, C});
16888 }
16889 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
16890 Value *LHS = EmitScalarExpr(E->getArg(0));
16891 Value *RHS = EmitScalarExpr(E->getArg(1));
16892 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
16893 return Builder.CreateCall(Callee, {LHS, RHS});
16894 }
16895 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
16896 Value *Vec = EmitScalarExpr(E->getArg(0));
16897 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
16898 return Builder.CreateCall(Callee, {Vec});
16899 }
16900 case WebAssembly::BI__builtin_wasm_eq_i64x2: {
16901 Value *LHS = EmitScalarExpr(E->getArg(0));
16902 Value *RHS = EmitScalarExpr(E->getArg(1));
16903 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_eq);
16904 return Builder.CreateCall(Callee, {LHS, RHS});
16905 }
16906 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16907 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16908 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16909 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16910 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16911 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16912 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16913 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
16914 unsigned IntNo;
16915 switch (BuiltinID) {
16916 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16917 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16918 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16919 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16920 IntNo = Intrinsic::wasm_anytrue;
16921 break;
16922 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16923 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16924 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16925 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
16926 IntNo = Intrinsic::wasm_alltrue;
16927 break;
16928 default:
16929 llvm_unreachable("unexpected builtin ID");
16930 }
16931 Value *Vec = EmitScalarExpr(E->getArg(0));
16932 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
16933 return Builder.CreateCall(Callee, {Vec});
16934 }
16935 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
16936 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
16937 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
16938 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
16939 Value *Vec = EmitScalarExpr(E->getArg(0));
16940 Function *Callee =
16941 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
16942 return Builder.CreateCall(Callee, {Vec});
16943 }
16944 case WebAssembly::BI__builtin_wasm_abs_f32x4:
16945 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
16946 Value *Vec = EmitScalarExpr(E->getArg(0));
16947 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
16948 return Builder.CreateCall(Callee, {Vec});
16949 }
16950 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
16951 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
16952 Value *Vec = EmitScalarExpr(E->getArg(0));
16953 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
16954 return Builder.CreateCall(Callee, {Vec});
16955 }
16956 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16957 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16958 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16959 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
16960 Value *A = EmitScalarExpr(E->getArg(0));
16961 Value *B = EmitScalarExpr(E->getArg(1));
16962 Value *C = EmitScalarExpr(E->getArg(2));
16963 unsigned IntNo;
16964 switch (BuiltinID) {
16965 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16966 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16967 IntNo = Intrinsic::wasm_qfma;
16968 break;
16969 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16970 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
16971 IntNo = Intrinsic::wasm_qfms;
16972 break;
16973 default:
16974 llvm_unreachable("unexpected builtin ID");
16975 }
16976 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
16977 return Builder.CreateCall(Callee, {A, B, C});
16978 }
16979 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16980 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16981 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16982 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
16983 Value *Low = EmitScalarExpr(E->getArg(0));
16984 Value *High = EmitScalarExpr(E->getArg(1));
16985 unsigned IntNo;
16986 switch (BuiltinID) {
16987 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16988 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16989 IntNo = Intrinsic::wasm_narrow_signed;
16990 break;
16991 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16992 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
16993 IntNo = Intrinsic::wasm_narrow_unsigned;
16994 break;
16995 default:
16996 llvm_unreachable("unexpected builtin ID");
16997 }
16998 Function *Callee =
16999 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
17000 return Builder.CreateCall(Callee, {Low, High});
17001 }
17002 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
17003 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
17004 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
17005 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
17006 Value *Vec = EmitScalarExpr(E->getArg(0));
17007 unsigned IntNo;
17008 switch (BuiltinID) {
17009 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
17010 IntNo = Intrinsic::wasm_widen_low_signed;
17011 break;
17012 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
17013 IntNo = Intrinsic::wasm_widen_high_signed;
17014 break;
17015 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
17016 IntNo = Intrinsic::wasm_widen_low_unsigned;
17017 break;
17018 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
17019 IntNo = Intrinsic::wasm_widen_high_unsigned;
17020 break;
17021 }
17022 Function *Callee = CGM.getIntrinsic(IntNo);
17023 return Builder.CreateCall(Callee, Vec);
17024 }
17025 case WebAssembly::BI__builtin_wasm_load32_zero: {
17026 Value *Ptr = EmitScalarExpr(E->getArg(0));
17027 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
17028 return Builder.CreateCall(Callee, {Ptr});
17029 }
17030 case WebAssembly::BI__builtin_wasm_load64_zero: {
17031 Value *Ptr = EmitScalarExpr(E->getArg(0));
17032 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
17033 return Builder.CreateCall(Callee, {Ptr});
17034 }
17035 case WebAssembly::BI__builtin_wasm_load8_lane:
17036 case WebAssembly::BI__builtin_wasm_load16_lane:
17037 case WebAssembly::BI__builtin_wasm_load32_lane:
17038 case WebAssembly::BI__builtin_wasm_load64_lane:
17039 case WebAssembly::BI__builtin_wasm_store8_lane:
17040 case WebAssembly::BI__builtin_wasm_store16_lane:
17041 case WebAssembly::BI__builtin_wasm_store32_lane:
17042 case WebAssembly::BI__builtin_wasm_store64_lane: {
17043 Value *Ptr = EmitScalarExpr(E->getArg(0));
17044 Value *Vec = EmitScalarExpr(E->getArg(1));
17045 Optional<llvm::APSInt> LaneIdxConst =
17046 E->getArg(2)->getIntegerConstantExpr(getContext());
17047 assert(LaneIdxConst && "Constant arg isn't actually constant?");
17048 Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
17049 unsigned IntNo;
17050 switch (BuiltinID) {
17051 case WebAssembly::BI__builtin_wasm_load8_lane:
17052 IntNo = Intrinsic::wasm_load8_lane;
17053 break;
17054 case WebAssembly::BI__builtin_wasm_load16_lane:
17055 IntNo = Intrinsic::wasm_load16_lane;
17056 break;
17057 case WebAssembly::BI__builtin_wasm_load32_lane:
17058 IntNo = Intrinsic::wasm_load32_lane;
17059 break;
17060 case WebAssembly::BI__builtin_wasm_load64_lane:
17061 IntNo = Intrinsic::wasm_load64_lane;
17062 break;
17063 case WebAssembly::BI__builtin_wasm_store8_lane:
17064 IntNo = Intrinsic::wasm_store8_lane;
17065 break;
17066 case WebAssembly::BI__builtin_wasm_store16_lane:
17067 IntNo = Intrinsic::wasm_store16_lane;
17068 break;
17069 case WebAssembly::BI__builtin_wasm_store32_lane:
17070 IntNo = Intrinsic::wasm_store32_lane;
17071 break;
17072 case WebAssembly::BI__builtin_wasm_store64_lane:
17073 IntNo = Intrinsic::wasm_store64_lane;
17074 break;
17075 default:
17076 llvm_unreachable("unexpected builtin ID");
17077 }
17078 Function *Callee = CGM.getIntrinsic(IntNo);
17079 return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
17080 }
17081 case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
17082 Value *Ops[18];
17083 size_t OpIdx = 0;
17084 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
17085 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
17086 while (OpIdx < 18) {
17087 Optional<llvm::APSInt> LaneConst =
17088 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
17089 assert(LaneConst && "Constant arg isn't actually constant?");
17090 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
17091 }
17092 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
17093 return Builder.CreateCall(Callee, Ops);
17094 }
17095 default:
17096 return nullptr;
17097 }
17098 }
17099
17100 static std::pair<Intrinsic::ID, unsigned>
getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID)17101 getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
17102 struct Info {
17103 unsigned BuiltinID;
17104 Intrinsic::ID IntrinsicID;
17105 unsigned VecLen;
17106 };
17107 Info Infos[] = {
17108 #define CUSTOM_BUILTIN_MAPPING(x,s) \
17109 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
17110 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
17111 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
17112 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
17113 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
17114 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
17115 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
17116 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
17117 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
17118 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
17119 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
17120 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
17121 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
17122 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
17123 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
17124 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
17125 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
17126 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
17127 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
17128 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
17129 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
17130 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
17131 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
17132 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
17133 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
17134 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
17135 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
17136 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
17137 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
17138 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
17139 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
17140 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
17141 #undef CUSTOM_BUILTIN_MAPPING
17142 };
17143
17144 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
17145 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
17146 (void)SortOnce;
17147
17148 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
17149 Info{BuiltinID, 0, 0}, CmpInfo);
17150 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
17151 return {Intrinsic::not_intrinsic, 0};
17152
17153 return {F->IntrinsicID, F->VecLen};
17154 }
17155
EmitHexagonBuiltinExpr(unsigned BuiltinID,const CallExpr * E)17156 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
17157 const CallExpr *E) {
17158 Intrinsic::ID ID;
17159 unsigned VecLen;
17160 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
17161
17162 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
17163 // The base pointer is passed by address, so it needs to be loaded.
17164 Address A = EmitPointerWithAlignment(E->getArg(0));
17165 Address BP = Address(
17166 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
17167 llvm::Value *Base = Builder.CreateLoad(BP);
17168 // The treatment of both loads and stores is the same: the arguments for
17169 // the builtin are the same as the arguments for the intrinsic.
17170 // Load:
17171 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
17172 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
17173 // Store:
17174 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
17175 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
17176 SmallVector<llvm::Value*,5> Ops = { Base };
17177 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
17178 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17179
17180 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
17181 // The load intrinsics generate two results (Value, NewBase), stores
17182 // generate one (NewBase). The new base address needs to be stored.
17183 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
17184 : Result;
17185 llvm::Value *LV = Builder.CreateBitCast(
17186 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
17187 Address Dest = EmitPointerWithAlignment(E->getArg(0));
17188 llvm::Value *RetVal =
17189 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
17190 if (IsLoad)
17191 RetVal = Builder.CreateExtractValue(Result, 0);
17192 return RetVal;
17193 };
17194
17195 // Handle the conversion of bit-reverse load intrinsics to bit code.
17196 // The intrinsic call after this function only reads from memory and the
17197 // write to memory is dealt by the store instruction.
17198 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
17199 // The intrinsic generates one result, which is the new value for the base
17200 // pointer. It needs to be returned. The result of the load instruction is
17201 // passed to intrinsic by address, so the value needs to be stored.
17202 llvm::Value *BaseAddress =
17203 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
17204
17205 // Expressions like &(*pt++) will be incremented per evaluation.
17206 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
17207 // per call.
17208 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
17209 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
17210 DestAddr.getAlignment());
17211 llvm::Value *DestAddress = DestAddr.getPointer();
17212
17213 // Operands are Base, Dest, Modifier.
17214 // The intrinsic format in LLVM IR is defined as
17215 // { ValueType, i8* } (i8*, i32).
17216 llvm::Value *Result = Builder.CreateCall(
17217 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
17218
17219 // The value needs to be stored as the variable is passed by reference.
17220 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
17221
17222 // The store needs to be truncated to fit the destination type.
17223 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
17224 // to be handled with stores of respective destination type.
17225 DestVal = Builder.CreateTrunc(DestVal, DestTy);
17226
17227 llvm::Value *DestForStore =
17228 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
17229 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
17230 // The updated value of the base pointer is returned.
17231 return Builder.CreateExtractValue(Result, 1);
17232 };
17233
17234 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
17235 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
17236 : Intrinsic::hexagon_V6_vandvrt;
17237 return Builder.CreateCall(CGM.getIntrinsic(ID),
17238 {Vec, Builder.getInt32(-1)});
17239 };
17240 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
17241 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
17242 : Intrinsic::hexagon_V6_vandqrt;
17243 return Builder.CreateCall(CGM.getIntrinsic(ID),
17244 {Pred, Builder.getInt32(-1)});
17245 };
17246
17247 switch (BuiltinID) {
17248 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
17249 // and the corresponding C/C++ builtins use loads/stores to update
17250 // the predicate.
17251 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
17252 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
17253 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
17254 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
17255 // Get the type from the 0-th argument.
17256 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
17257 Address PredAddr = Builder.CreateBitCast(
17258 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
17259 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
17260 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
17261 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
17262
17263 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
17264 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
17265 PredAddr.getAlignment());
17266 return Builder.CreateExtractValue(Result, 0);
17267 }
17268
17269 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
17270 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
17271 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
17272 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
17273 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
17274 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
17275 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
17276 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
17277 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
17278 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
17279 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
17280 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
17281 return MakeCircOp(ID, /*IsLoad=*/true);
17282 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
17283 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
17284 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
17285 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
17286 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
17287 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
17288 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
17289 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
17290 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
17291 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
17292 return MakeCircOp(ID, /*IsLoad=*/false);
17293 case Hexagon::BI__builtin_brev_ldub:
17294 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
17295 case Hexagon::BI__builtin_brev_ldb:
17296 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
17297 case Hexagon::BI__builtin_brev_lduh:
17298 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
17299 case Hexagon::BI__builtin_brev_ldh:
17300 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
17301 case Hexagon::BI__builtin_brev_ldw:
17302 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
17303 case Hexagon::BI__builtin_brev_ldd:
17304 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
17305
17306 default: {
17307 if (ID == Intrinsic::not_intrinsic)
17308 return nullptr;
17309
17310 auto IsVectorPredTy = [](llvm::Type *T) {
17311 return T->isVectorTy() &&
17312 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
17313 };
17314
17315 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
17316 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
17317 SmallVector<llvm::Value*,4> Ops;
17318 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
17319 llvm::Type *T = IntrTy->getParamType(i);
17320 const Expr *A = E->getArg(i);
17321 if (IsVectorPredTy(T)) {
17322 // There will be an implicit cast to a boolean vector. Strip it.
17323 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
17324 if (Cast->getCastKind() == CK_BitCast)
17325 A = Cast->getSubExpr();
17326 }
17327 Ops.push_back(V2Q(EmitScalarExpr(A)));
17328 } else {
17329 Ops.push_back(EmitScalarExpr(A));
17330 }
17331 }
17332
17333 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
17334 if (IsVectorPredTy(IntrTy->getReturnType()))
17335 Call = Q2V(Call);
17336
17337 return Call;
17338 } // default
17339 } // switch
17340
17341 return nullptr;
17342 }
17343