• Home
  • Raw
  • Download

Lines Matching +full:get +full:- +full:intrinsic

1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
8 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
36 /// getBuiltinLibFunction - Given a builtin id for a function like
42 // Get the name, skip over the __builtin_ prefix (if necessary). in getBuiltinLibFunction()
49 if (FD->hasAttr<AsmLabelAttr>()) in getBuiltinLibFunction()
55 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); in getBuiltinLibFunction()
66 if (V->getType()->isPointerTy()) in EmitToInt()
69 assert(V->getType() == IntType); in EmitToInt()
77 if (ResultType->isPointerTy()) in EmitFromInt()
80 assert(V->getType() == ResultType); in EmitFromInt()
89 QualType T = E->getType(); in MakeBinaryAtomicValue()
90 assert(E->getArg(0)->getType()->isPointerType()); in MakeBinaryAtomicValue()
92 E->getArg(0)->getType()->getPointeeType())); in MakeBinaryAtomicValue()
93 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); in MakeBinaryAtomicValue()
95 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); in MakeBinaryAtomicValue()
96 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); in MakeBinaryAtomicValue()
99 llvm::IntegerType::get(CGF.getLLVMContext(), in MakeBinaryAtomicValue()
101 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); in MakeBinaryAtomicValue()
105 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); in MakeBinaryAtomicValue()
106 llvm::Type *ValueType = Args[1]->getType(); in MakeBinaryAtomicValue()
115 Value *Val = CGF.EmitScalarExpr(E->getArg(0)); in EmitNontemporalStore()
116 Value *Address = CGF.EmitScalarExpr(E->getArg(1)); in EmitNontemporalStore()
119 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); in EmitNontemporalStore()
121 Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); in EmitNontemporalStore()
122 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); in EmitNontemporalStore()
129 Value *Address = CGF.EmitScalarExpr(E->getArg(0)); in EmitNontemporalLoad()
131 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); in EmitNontemporalLoad()
133 return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); in EmitNontemporalLoad()
139 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); in EmitBinaryAtomic()
150 QualType T = E->getType(); in EmitBinaryAtomicPost()
151 assert(E->getArg(0)->getType()->isPointerType()); in EmitBinaryAtomicPost()
153 E->getArg(0)->getType()->getPointeeType())); in EmitBinaryAtomicPost()
154 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); in EmitBinaryAtomicPost()
156 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); in EmitBinaryAtomicPost()
157 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); in EmitBinaryAtomicPost()
160 llvm::IntegerType::get(CGF.getLLVMContext(), in EmitBinaryAtomicPost()
162 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); in EmitBinaryAtomicPost()
165 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); in EmitBinaryAtomicPost()
166 llvm::Type *ValueType = Args[1]->getType(); in EmitBinaryAtomicPost()
175 llvm::ConstantInt::get(IntType, -1)); in EmitBinaryAtomicPost()
177 return RValue::get(Result); in EmitBinaryAtomicPost()
184 /// arg0 - address to operate on
185 /// arg1 - value to compare with
186 /// arg2 - new value
193 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); in MakeAtomicCmpXchgValue()
194 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); in MakeAtomicCmpXchgValue()
195 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); in MakeAtomicCmpXchgValue()
197 llvm::IntegerType *IntType = llvm::IntegerType::get( in MakeAtomicCmpXchgValue()
199 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); in MakeAtomicCmpXchgValue()
203 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); in MakeAtomicCmpXchgValue()
204 llvm::Type *ValueType = Args[1]->getType(); in MakeAtomicCmpXchgValue()
206 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); in MakeAtomicCmpXchgValue()
214 CGF.ConvertType(E->getType())); in MakeAtomicCmpXchgValue()
221 // Emit a simple mangled intrinsic that has 1 argument and a return type
226 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitUnaryBuiltin()
228 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitUnaryBuiltin()
232 // Emit an intrinsic that has 2 operands of the same type as its result.
236 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryBuiltin()
237 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitBinaryBuiltin()
239 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitBinaryBuiltin()
243 // Emit an intrinsic that has 3 operands of the same type as its result.
247 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitTernaryBuiltin()
248 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitTernaryBuiltin()
249 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); in emitTernaryBuiltin()
251 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitTernaryBuiltin()
255 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
259 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitFPIntBuiltin()
260 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitFPIntBuiltin()
262 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitFPIntBuiltin()
266 /// EmitFAbs - Emit a call to @llvm.fabs().
268 Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); in EmitFAbs()
270 Call->setDoesNotAccessMemory(); in EmitFAbs()
279 llvm::Type *Ty = V->getType(); in EmitSignBit()
280 int Width = Ty->getPrimitiveSizeInBits(); in EmitSignBit()
281 llvm::Type *IntTy = llvm::IntegerType::get(C, Width); in EmitSignBit()
283 if (Ty->isPPC_FP128Ty()) { in EmitSignBit()
284 // We want the sign bit of the higher-order double. The bitcast we just in EmitSignBit()
285 // did works as if the double-double was stored to memory and then in EmitSignBit()
286 // read as an i128. The "store" will put the higher-order double in the in EmitSignBit()
287 // lower address in both little- and big-Endian modes, but the "load" in EmitSignBit()
289 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian in EmitSignBit()
293 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); in EmitSignBit()
296 // We are truncating value in order to extract the higher-order in EmitSignBit()
298 IntTy = llvm::IntegerType::get(C, Width); in EmitSignBit()
307 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E, in emitLibraryCall()
315 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
319 /// \returns The result (i.e. sum/product) returned by the intrinsic.
321 const llvm::Intrinsic::ID IntrinsicID, in EmitOverflowIntrinsic()
325 assert(X->getType() == Y->getType() && in EmitOverflowIntrinsic()
329 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); in EmitOverflowIntrinsic()
342 Call->setMetadata(llvm::LLVMContext::MD_range, RNode); in emitRangedBuiltin()
356 assert(Type->isIntegerType() && "Given type is not an integer."); in getIntegerWidthAndSignedness()
357 unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width; in getIntegerWidthAndSignedness()
358 bool Signed = Type->isSignedIntegerType(); in getIntegerWidthAndSignedness()
392 if (ArgValue->getType() != DestType) in EmitVAStartEnd()
394 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); in EmitVAStartEnd()
396 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; in EmitVAStartEnd()
411 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); in getDefaultBuiltinObjectSizeResult()
418 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) in evaluateOrEmitBuiltinObjectSize()
420 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); in evaluateOrEmitBuiltinObjectSize()
425 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
427 /// - A call to the @llvm.objectsize intrinsic
433 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { in emitBuiltinObjectSize()
434 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); in emitBuiltinObjectSize()
435 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); in emitBuiltinObjectSize()
437 areBOSTypesCompatible(PS->getType(), Type)) { in emitBuiltinObjectSize()
441 const ImplicitParamDecl *D = Iter->second; in emitBuiltinObjectSize()
445 return EmitLoadOfScalar(DIter->second, /*volatile=*/false, in emitBuiltinObjectSize()
446 getContext().getSizeType(), E->getLocStart()); in emitBuiltinObjectSize()
451 // evaluate E for side-effects. In either case, we shouldn't lower to in emitBuiltinObjectSize()
453 if (Type == 3 || E->HasSideEffects(getContext())) in emitBuiltinObjectSize()
458 auto *CI = ConstantInt::get(Builder.getInt1Ty(), (Type & 2) >> 1); in emitBuiltinObjectSize()
459 // FIXME: Get right address space. in emitBuiltinObjectSize()
461 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys); in emitBuiltinObjectSize()
470 if (E->EvaluateAsRValue(Result, CGM.getContext()) && in EmitBuiltinExpr()
473 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), in EmitBuiltinExpr()
476 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), in EmitBuiltinExpr()
484 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr)); in EmitBuiltinExpr()
489 return RValue::get( in EmitBuiltinExpr()
491 ? EmitScalarExpr(E->getArg(0)) in EmitBuiltinExpr()
492 : EmitVAListRef(E->getArg(0)).getPointer(), in EmitBuiltinExpr()
495 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); in EmitBuiltinExpr()
496 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); in EmitBuiltinExpr()
502 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), in EmitBuiltinExpr()
508 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
513 llvm::Constant::getNullValue(ArgValue->getType()), in EmitBuiltinExpr()
518 return RValue::get(Result); in EmitBuiltinExpr()
523 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); in EmitBuiltinExpr()
528 Value *Arg1 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
529 Value *Arg2 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
531 return RValue::get(Result); in EmitBuiltinExpr()
536 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); in EmitBuiltinExpr()
541 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); in EmitBuiltinExpr()
546 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); in EmitBuiltinExpr()
551 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); in EmitBuiltinExpr()
556 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); in EmitBuiltinExpr()
561 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); in EmitBuiltinExpr()
566 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); in EmitBuiltinExpr()
571 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); in EmitBuiltinExpr()
576 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); in EmitBuiltinExpr()
581 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
585 Imag->getType()->isFPOrFPVectorTy() in EmitBuiltinExpr()
586 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) in EmitBuiltinExpr()
587 : llvm::Constant::getNullValue(Imag->getType()); in EmitBuiltinExpr()
598 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
599 return RValue::get(ComplexVal.first); in EmitBuiltinExpr()
608 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
609 return RValue::get(ComplexVal.second); in EmitBuiltinExpr()
616 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
618 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
619 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); in EmitBuiltinExpr()
621 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
624 if (Result->getType() != ResultType) in EmitBuiltinExpr()
627 return RValue::get(Result); in EmitBuiltinExpr()
633 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
635 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
636 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); in EmitBuiltinExpr()
638 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
641 if (Result->getType() != ResultType) in EmitBuiltinExpr()
644 return RValue::get(Result); in EmitBuiltinExpr()
649 // ffs(x) -> x ? cttz(x) + 1 : 0 in EmitBuiltinExpr()
650 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
652 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
653 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); in EmitBuiltinExpr()
655 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
658 llvm::ConstantInt::get(ArgType, 1)); in EmitBuiltinExpr()
662 if (Result->getType() != ResultType) in EmitBuiltinExpr()
665 return RValue::get(Result); in EmitBuiltinExpr()
670 // parity(x) -> ctpop(x) & 1 in EmitBuiltinExpr()
671 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
673 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
674 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); in EmitBuiltinExpr()
676 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
678 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); in EmitBuiltinExpr()
679 if (Result->getType() != ResultType) in EmitBuiltinExpr()
682 return RValue::get(Result); in EmitBuiltinExpr()
687 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
689 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
690 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); in EmitBuiltinExpr()
692 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
694 if (Result->getType() != ResultType) in EmitBuiltinExpr()
697 return RValue::get(Result); in EmitBuiltinExpr()
703 return RValue::get(EmitScalarExpr(E->getArg(0))); in EmitBuiltinExpr()
706 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
707 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
709 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
710 // Don't generate llvm.expect on -O0 as the backend won't use it for in EmitBuiltinExpr()
712 // Note, we still IRGen ExpectedValue because it could have side-effects. in EmitBuiltinExpr()
714 return RValue::get(ArgValue); in EmitBuiltinExpr()
716 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); in EmitBuiltinExpr()
719 return RValue::get(Result); in EmitBuiltinExpr()
722 Value *PtrValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
724 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; in EmitBuiltinExpr()
726 Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
728 unsigned Alignment = (unsigned) AlignmentCI->getZExtValue(); in EmitBuiltinExpr()
731 return RValue::get(PtrValue); in EmitBuiltinExpr()
735 if (E->getArg(0)->HasSideEffects(getContext())) in EmitBuiltinExpr()
736 return RValue::get(nullptr); in EmitBuiltinExpr()
738 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
739 Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume); in EmitBuiltinExpr()
740 return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); in EmitBuiltinExpr()
745 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); in EmitBuiltinExpr()
751 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); in EmitBuiltinExpr()
755 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); in EmitBuiltinExpr()
756 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); in EmitBuiltinExpr()
760 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType)); in EmitBuiltinExpr()
763 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
765 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : in EmitBuiltinExpr()
766 llvm::ConstantInt::get(Int32Ty, 0); in EmitBuiltinExpr()
767 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : in EmitBuiltinExpr()
768 llvm::ConstantInt::get(Int32Ty, 3); in EmitBuiltinExpr()
769 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); in EmitBuiltinExpr()
770 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); in EmitBuiltinExpr()
771 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); in EmitBuiltinExpr()
774 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); in EmitBuiltinExpr()
775 return RValue::get(Builder.CreateCall(F)); in EmitBuiltinExpr()
778 Value *Begin = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
779 Value *End = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
780 Value *F = CGM.getIntrinsic(Intrinsic::clear_cache); in EmitBuiltinExpr()
781 return RValue::get(Builder.CreateCall(F, {Begin, End})); in EmitBuiltinExpr()
784 return RValue::get(EmitTrapCall(Intrinsic::trap)); in EmitBuiltinExpr()
786 return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); in EmitBuiltinExpr()
792 "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()), in EmitBuiltinExpr()
800 return RValue::get(nullptr); in EmitBuiltinExpr()
806 Value *Base = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
807 Value *Exponent = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
808 llvm::Type *ArgType = Base->getType(); in EmitBuiltinExpr()
809 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); in EmitBuiltinExpr()
810 return RValue::get(Builder.CreateCall(F, {Base, Exponent})); in EmitBuiltinExpr()
821 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
822 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
846 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); in EmitBuiltinExpr()
849 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
851 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); in EmitBuiltinExpr()
856 // isinf(x) --> fabs(x) == infinity in EmitBuiltinExpr()
857 // isfinite(x) --> fabs(x) != infinity in EmitBuiltinExpr()
859 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
861 Constant *Infinity = ConstantFP::getInfinity(V->getType()); in EmitBuiltinExpr()
866 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); in EmitBuiltinExpr()
870 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 in EmitBuiltinExpr()
871 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
874 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); in EmitBuiltinExpr()
877 llvm::Type *IntTy = ConvertType(E->getType()); in EmitBuiltinExpr()
879 Value *One = ConstantInt::get(IntTy, 1); in EmitBuiltinExpr()
880 Value *NegativeOne = ConstantInt::get(IntTy, -1); in EmitBuiltinExpr()
883 return RValue::get(Result); in EmitBuiltinExpr()
887 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min in EmitBuiltinExpr()
888 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
893 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); in EmitBuiltinExpr()
895 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); in EmitBuiltinExpr()
897 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), in EmitBuiltinExpr()
901 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); in EmitBuiltinExpr()
905 Value *V = EmitScalarExpr(E->getArg(5)); in EmitBuiltinExpr()
906 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); in EmitBuiltinExpr()
910 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); in EmitBuiltinExpr()
913 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, in EmitBuiltinExpr()
920 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); in EmitBuiltinExpr()
921 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); in EmitBuiltinExpr()
923 Result->addIncoming(ZeroLiteral, Begin); in EmitBuiltinExpr()
928 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
929 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); in EmitBuiltinExpr()
931 Result->addIncoming(NanLiteral, NotZero); in EmitBuiltinExpr()
937 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), in EmitBuiltinExpr()
939 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
940 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); in EmitBuiltinExpr()
942 Result->addIncoming(InfLiteral, NotNan); in EmitBuiltinExpr()
947 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); in EmitBuiltinExpr()
949 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), in EmitBuiltinExpr()
952 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), in EmitBuiltinExpr()
953 EmitScalarExpr(E->getArg(3))); in EmitBuiltinExpr()
955 Result->addIncoming(NormalResult, NotInf); in EmitBuiltinExpr()
959 return RValue::get(Result); in EmitBuiltinExpr()
965 Value *Size = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
966 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size)); in EmitBuiltinExpr()
970 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
971 Value *SizeVal = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
972 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), in EmitBuiltinExpr()
973 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
975 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
979 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
980 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
981 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
982 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), in EmitBuiltinExpr()
983 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
984 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), in EmitBuiltinExpr()
985 E->getArg(1)->getExprLoc(), FD, 1); in EmitBuiltinExpr()
987 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
993 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || in EmitBuiltinExpr()
994 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) in EmitBuiltinExpr()
998 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
999 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
1000 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); in EmitBuiltinExpr()
1002 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
1006 Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1007 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
1008 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
1011 return RValue::get(DestAddr.getPointer()); in EmitBuiltinExpr()
1017 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || in EmitBuiltinExpr()
1018 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) in EmitBuiltinExpr()
1022 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1023 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
1024 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); in EmitBuiltinExpr()
1026 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
1031 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1032 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
1033 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
1034 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), in EmitBuiltinExpr()
1035 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
1036 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), in EmitBuiltinExpr()
1037 E->getArg(1)->getExprLoc(), FD, 1); in EmitBuiltinExpr()
1039 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
1043 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1044 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
1046 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
1047 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), in EmitBuiltinExpr()
1048 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
1050 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
1055 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || in EmitBuiltinExpr()
1056 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) in EmitBuiltinExpr()
1060 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1061 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
1063 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); in EmitBuiltinExpr()
1065 return RValue::get(Dest.getPointer()); in EmitBuiltinExpr()
1075 // this instead of hard-coding 0, which is correct for most targets. in EmitBuiltinExpr()
1078 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); in EmitBuiltinExpr()
1079 return RValue::get(Builder.CreateCall(F, in EmitBuiltinExpr()
1080 llvm::ConstantInt::get(Int32Ty, Offset))); in EmitBuiltinExpr()
1084 CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this); in EmitBuiltinExpr()
1085 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); in EmitBuiltinExpr()
1086 return RValue::get(Builder.CreateCall(F, Depth)); in EmitBuiltinExpr()
1090 CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this); in EmitBuiltinExpr()
1091 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); in EmitBuiltinExpr()
1092 return RValue::get(Builder.CreateCall(F, Depth)); in EmitBuiltinExpr()
1095 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1097 return RValue::get(Result); in EmitBuiltinExpr()
1100 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1102 return RValue::get(Result); in EmitBuiltinExpr()
1106 = cast<llvm::IntegerType>(ConvertType(E->getType())); in EmitBuiltinExpr()
1108 if (Column == -1) { in EmitBuiltinExpr()
1110 return RValue::get(llvm::UndefValue::get(Ty)); in EmitBuiltinExpr()
1112 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); in EmitBuiltinExpr()
1115 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1118 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); in EmitBuiltinExpr()
1121 Value *Int = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1122 Value *Ptr = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1124 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); in EmitBuiltinExpr()
1125 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && in EmitBuiltinExpr()
1126 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); in EmitBuiltinExpr()
1127 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 in EmitBuiltinExpr()
1128 ? Intrinsic::eh_return_i32 in EmitBuiltinExpr()
1129 : Intrinsic::eh_return_i64); in EmitBuiltinExpr()
1136 return RValue::get(nullptr); in EmitBuiltinExpr()
1139 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); in EmitBuiltinExpr()
1140 return RValue::get(Builder.CreateCall(F)); in EmitBuiltinExpr()
1147 // doesn't implicitly ignore high-order bits when doing in EmitBuiltinExpr()
1151 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html in EmitBuiltinExpr()
1154 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1158 if (IntPtrTy->getBitWidth() == 64) in EmitBuiltinExpr()
1159 return RValue::get(Result); in EmitBuiltinExpr()
1163 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); in EmitBuiltinExpr()
1165 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); in EmitBuiltinExpr()
1169 Address Buf = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1173 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), in EmitBuiltinExpr()
1174 ConstantInt::get(Int32Ty, 0)); in EmitBuiltinExpr()
1179 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); in EmitBuiltinExpr()
1185 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); in EmitBuiltinExpr()
1187 return RValue::get(Builder.CreateCall(F, Buf.getPointer())); in EmitBuiltinExpr()
1190 Value *Buf = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1194 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); in EmitBuiltinExpr()
1202 return RValue::get(nullptr); in EmitBuiltinExpr()
1317 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); in EmitBuiltinExpr()
1324 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); in EmitBuiltinExpr()
1345 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1346 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); in EmitBuiltinExpr()
1348 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), in EmitBuiltinExpr()
1350 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); in EmitBuiltinExpr()
1354 Store->setAtomic(llvm::AtomicOrdering::Release); in EmitBuiltinExpr()
1355 return RValue::get(nullptr); in EmitBuiltinExpr()
1359 // We assume this is supposed to correspond to a C++0x-style in EmitBuiltinExpr()
1360 // sequentially-consistent fence (i.e. this is only usable for in EmitBuiltinExpr()
1361 // synchonization, not device I/O or anything like that). This intrinsic in EmitBuiltinExpr()
1364 // to use it with non-atomic loads and stores to get acquire/release in EmitBuiltinExpr()
1367 return RValue::get(nullptr); in EmitBuiltinExpr()
1371 return RValue::get(EmitNontemporalLoad(*this, E)); in EmitBuiltinExpr()
1373 return RValue::get(EmitNontemporalStore(*this, E)); in EmitBuiltinExpr()
1377 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since in EmitBuiltinExpr()
1378 // _Atomic(T) is always properly-aligned. in EmitBuiltinExpr()
1381 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), in EmitBuiltinExpr()
1384 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), in EmitBuiltinExpr()
1387 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), in EmitBuiltinExpr()
1390 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); in EmitBuiltinExpr()
1399 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); in EmitBuiltinExpr()
1401 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
1403 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1404 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); in EmitBuiltinExpr()
1405 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); in EmitBuiltinExpr()
1407 Value *Order = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1409 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
1437 Result->setVolatile(Volatile); in EmitBuiltinExpr()
1438 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); in EmitBuiltinExpr()
1465 RMW->setVolatile(Volatile); in EmitBuiltinExpr()
1466 Result->addIncoming(RMW, BBs[i]); in EmitBuiltinExpr()
1470 SI->addCase(Builder.getInt32(0), BBs[0]); in EmitBuiltinExpr()
1471 SI->addCase(Builder.getInt32(1), BBs[1]); in EmitBuiltinExpr()
1472 SI->addCase(Builder.getInt32(2), BBs[1]); in EmitBuiltinExpr()
1473 SI->addCase(Builder.getInt32(3), BBs[2]); in EmitBuiltinExpr()
1474 SI->addCase(Builder.getInt32(4), BBs[3]); in EmitBuiltinExpr()
1475 SI->addCase(Builder.getInt32(5), BBs[4]); in EmitBuiltinExpr()
1478 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); in EmitBuiltinExpr()
1482 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); in EmitBuiltinExpr()
1484 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
1486 Address Ptr = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
1487 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); in EmitBuiltinExpr()
1488 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); in EmitBuiltinExpr()
1490 Value *Order = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1492 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
1497 Store->setOrdering(llvm::AtomicOrdering::Monotonic); in EmitBuiltinExpr()
1500 Store->setOrdering(llvm::AtomicOrdering::Release); in EmitBuiltinExpr()
1503 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); in EmitBuiltinExpr()
1506 return RValue::get(nullptr); in EmitBuiltinExpr()
1526 Store->setOrdering(Orders[i]); in EmitBuiltinExpr()
1530 SI->addCase(Builder.getInt32(0), BBs[0]); in EmitBuiltinExpr()
1531 SI->addCase(Builder.getInt32(3), BBs[1]); in EmitBuiltinExpr()
1532 SI->addCase(Builder.getInt32(5), BBs[2]); in EmitBuiltinExpr()
1535 return RValue::get(nullptr); in EmitBuiltinExpr()
1548 Value *Order = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1550 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
1570 return RValue::get(nullptr); in EmitBuiltinExpr()
1586 SI->addCase(Builder.getInt32(1), AcquireBB); in EmitBuiltinExpr()
1587 SI->addCase(Builder.getInt32(2), AcquireBB); in EmitBuiltinExpr()
1592 SI->addCase(Builder.getInt32(3), ReleaseBB); in EmitBuiltinExpr()
1597 SI->addCase(Builder.getInt32(4), AcqRelBB); in EmitBuiltinExpr()
1602 SI->addCase(Builder.getInt32(5), SeqCstBB); in EmitBuiltinExpr()
1605 return RValue::get(nullptr); in EmitBuiltinExpr()
1612 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only in EmitBuiltinExpr()
1613 // in finite- or unsafe-math mode (the intrinsic has different semantics in EmitBuiltinExpr()
1615 // -fmath-errno=0 is not enough). in EmitBuiltinExpr()
1616 if (!FD->hasAttr<ConstAttr>()) in EmitBuiltinExpr()
1621 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1622 llvm::Type *ArgType = Arg0->getType(); in EmitBuiltinExpr()
1623 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType); in EmitBuiltinExpr()
1624 return RValue::get(Builder.CreateCall(F, Arg0)); in EmitBuiltinExpr()
1633 // Transform a call to pow* into a @llvm.pow.* intrinsic call. in EmitBuiltinExpr()
1634 if (!FD->hasAttr<ConstAttr>()) in EmitBuiltinExpr()
1636 Value *Base = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1637 Value *Exponent = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1638 llvm::Type *ArgType = Base->getType(); in EmitBuiltinExpr()
1639 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); in EmitBuiltinExpr()
1640 return RValue::get(Builder.CreateCall(F, {Base, Exponent})); in EmitBuiltinExpr()
1649 // Rewrite fma to intrinsic. in EmitBuiltinExpr()
1650 Value *FirstArg = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1651 llvm::Type *ArgType = FirstArg->getType(); in EmitBuiltinExpr()
1652 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); in EmitBuiltinExpr()
1653 return RValue::get( in EmitBuiltinExpr()
1654 Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
1655 EmitScalarExpr(E->getArg(2))})); in EmitBuiltinExpr()
1661 return RValue::get( in EmitBuiltinExpr()
1662 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), in EmitBuiltinExpr()
1663 ConvertType(E->getType()))); in EmitBuiltinExpr()
1666 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1667 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, in EmitBuiltinExpr()
1668 AnnVal->getType()); in EmitBuiltinExpr()
1670 // Get the annotation string, go through casts. Sema requires this to be a in EmitBuiltinExpr()
1671 // non-wide string literal, potentially casted, so the cast<> is safe. in EmitBuiltinExpr()
1672 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); in EmitBuiltinExpr()
1673 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); in EmitBuiltinExpr()
1674 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); in EmitBuiltinExpr()
1705 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1706 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1707 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
1708 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); in EmitBuiltinExpr()
1711 llvm::Intrinsic::ID IntrinsicId; in EmitBuiltinExpr()
1719 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; in EmitBuiltinExpr()
1726 IntrinsicId = llvm::Intrinsic::usub_with_overflow; in EmitBuiltinExpr()
1738 X->getType()); in EmitBuiltinExpr()
1740 return RValue::get(Sum2); in EmitBuiltinExpr()
1746 const clang::Expr *LeftArg = E->getArg(0); in EmitBuiltinExpr()
1747 const clang::Expr *RightArg = E->getArg(1); in EmitBuiltinExpr()
1748 const clang::Expr *ResultArg = E->getArg(2); in EmitBuiltinExpr()
1751 ResultArg->getType()->castAs<PointerType>()->getPointeeType(); in EmitBuiltinExpr()
1754 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); in EmitBuiltinExpr()
1756 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); in EmitBuiltinExpr()
1763 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); in EmitBuiltinExpr()
1767 llvm::Intrinsic::ID IntrinsicId; in EmitBuiltinExpr()
1773 ? llvm::Intrinsic::sadd_with_overflow in EmitBuiltinExpr()
1774 : llvm::Intrinsic::uadd_with_overflow; in EmitBuiltinExpr()
1778 ? llvm::Intrinsic::ssub_with_overflow in EmitBuiltinExpr()
1779 : llvm::Intrinsic::usub_with_overflow; in EmitBuiltinExpr()
1783 ? llvm::Intrinsic::smul_with_overflow in EmitBuiltinExpr()
1784 : llvm::Intrinsic::umul_with_overflow; in EmitBuiltinExpr()
1818 ResultArg->getType()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
1821 return RValue::get(Overflow); in EmitBuiltinExpr()
1846 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
1847 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1848 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); in EmitBuiltinExpr()
1851 llvm::Intrinsic::ID IntrinsicId; in EmitBuiltinExpr()
1857 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; in EmitBuiltinExpr()
1862 IntrinsicId = llvm::Intrinsic::usub_with_overflow; in EmitBuiltinExpr()
1867 IntrinsicId = llvm::Intrinsic::umul_with_overflow; in EmitBuiltinExpr()
1872 IntrinsicId = llvm::Intrinsic::sadd_with_overflow; in EmitBuiltinExpr()
1877 IntrinsicId = llvm::Intrinsic::ssub_with_overflow; in EmitBuiltinExpr()
1882 IntrinsicId = llvm::Intrinsic::smul_with_overflow; in EmitBuiltinExpr()
1891 return RValue::get(Carry); in EmitBuiltinExpr()
1894 return RValue::get(EmitLValue(E->getArg(0)).getPointer()); in EmitBuiltinExpr()
1896 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(), in EmitBuiltinExpr()
1897 E->getArg(0), false); in EmitBuiltinExpr()
1899 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(), in EmitBuiltinExpr()
1900 E->getArg(0), true); in EmitBuiltinExpr()
1903 return RValue::get(ConstantInt::get(IntTy, 0)); in EmitBuiltinExpr()
1905 const CallExpr *Call = cast<CallExpr>(E->getArg(0)); in EmitBuiltinExpr()
1906 const Expr *Chain = E->getArg(1); in EmitBuiltinExpr()
1907 return EmitCall(Call->getCallee()->getType(), in EmitBuiltinExpr()
1908 EmitScalarExpr(Call->getCallee()), Call, ReturnValue, in EmitBuiltinExpr()
1909 Call->getCalleeDecl(), EmitScalarExpr(Chain)); in EmitBuiltinExpr()
1917 IntegerType::get(getLLVMContext(), in EmitBuiltinExpr()
1918 getContext().getTypeSize(E->getType())); in EmitBuiltinExpr()
1919 llvm::Type *IntPtrType = IntType->getPointerTo(); in EmitBuiltinExpr()
1922 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); in EmitBuiltinExpr()
1924 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
1925 RTy = Exchange->getType(); in EmitBuiltinExpr()
1929 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); in EmitBuiltinExpr()
1935 Result->setVolatile(true); in EmitBuiltinExpr()
1937 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, in EmitBuiltinExpr()
1943 EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
1944 EmitScalarExpr(E->getArg(2)), in EmitBuiltinExpr()
1945 EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
1948 CXI->setVolatile(true); in EmitBuiltinExpr()
1949 return RValue::get(Builder.CreateExtractValue(CXI, 0)); in EmitBuiltinExpr()
1952 llvm::Type *IntTy = ConvertType(E->getType()); in EmitBuiltinExpr()
1955 EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
1956 ConstantInt::get(IntTy, 1), in EmitBuiltinExpr()
1958 RMWI->setVolatile(true); in EmitBuiltinExpr()
1959 return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1))); in EmitBuiltinExpr()
1962 llvm::Type *IntTy = ConvertType(E->getType()); in EmitBuiltinExpr()
1965 EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
1966 ConstantInt::get(IntTy, 1), in EmitBuiltinExpr()
1968 RMWI->setVolatile(true); in EmitBuiltinExpr()
1969 return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1))); in EmitBuiltinExpr()
1974 EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
1975 EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
1977 RMWI->setVolatile(true); in EmitBuiltinExpr()
1978 return RValue::get(RMWI); in EmitBuiltinExpr()
1981 llvm::Type *IntTy = ConvertType(E->getType()); in EmitBuiltinExpr()
1983 Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
1984 llvm::PointerType::get(IntTy, 257)); in EmitBuiltinExpr()
1987 return RValue::get(Load); in EmitBuiltinExpr()
1992 return RValue::get(EmitSEHExceptionCode()); in EmitBuiltinExpr()
1995 return RValue::get(EmitSEHExceptionInfo()); in EmitBuiltinExpr()
1998 return RValue::get(EmitSEHAbnormalTermination()); in EmitBuiltinExpr()
2003 AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, in EmitBuiltinExpr()
2006 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), in EmitBuiltinExpr()
2009 EmitScalarExpr(E->getArg(0)), Int8PtrTy); in EmitBuiltinExpr()
2011 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), in EmitBuiltinExpr()
2012 ConstantInt::get(Int32Ty, 0)); in EmitBuiltinExpr()
2016 return RValue::get(CS.getInstruction()); in EmitBuiltinExpr()
2023 AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, in EmitBuiltinExpr()
2026 EmitScalarExpr(E->getArg(0)), Int8PtrTy); in EmitBuiltinExpr()
2031 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true), in EmitBuiltinExpr()
2033 llvm::Value *Count = ConstantInt::get(IntTy, 0); in EmitBuiltinExpr()
2039 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), in EmitBuiltinExpr()
2042 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), in EmitBuiltinExpr()
2043 ConstantInt::get(Int32Ty, 0)); in EmitBuiltinExpr()
2048 return RValue::get(CS.getInstruction()); in EmitBuiltinExpr()
2055 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) in EmitBuiltinExpr()
2056 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); in EmitBuiltinExpr()
2060 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions in EmitBuiltinExpr()
2063 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
2064 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
2069 llvm::Type *I8PTy = llvm::PointerType::get( in EmitBuiltinExpr()
2073 if (2U == E->getNumArgs()) { in EmitBuiltinExpr()
2078 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy}; in EmitBuiltinExpr()
2079 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2082 return RValue::get(Builder.CreateCall( in EmitBuiltinExpr()
2085 assert(4 == E->getNumArgs() && in EmitBuiltinExpr()
2090 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy}; in EmitBuiltinExpr()
2091 Value *Arg2 = EmitScalarExpr(E->getArg(2)), in EmitBuiltinExpr()
2092 *Arg3 = EmitScalarExpr(E->getArg(3)); in EmitBuiltinExpr()
2093 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2098 if (Arg2->getType() != Int32Ty) in EmitBuiltinExpr()
2100 return RValue::get(Builder.CreateCall( in EmitBuiltinExpr()
2104 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write in EmitBuiltinExpr()
2127 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
2128 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
2132 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty}; in EmitBuiltinExpr()
2133 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2137 if (Arg1->getType() != Int32Ty) in EmitBuiltinExpr()
2139 return RValue::get( in EmitBuiltinExpr()
2142 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write in EmitBuiltinExpr()
2164 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
2165 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
2168 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType()}; in EmitBuiltinExpr()
2170 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), in EmitBuiltinExpr()
2173 return RValue::get( in EmitBuiltinExpr()
2176 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions in EmitBuiltinExpr()
2186 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2187 llvm::Type *ArgTys[] = {Arg0->getType()}; in EmitBuiltinExpr()
2188 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2191 return RValue::get( in EmitBuiltinExpr()
2195 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. in EmitBuiltinExpr()
2199 auto Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2200 auto NewArgT = llvm::PointerType::get(Int8Ty, in EmitBuiltinExpr()
2202 auto NewRetT = llvm::PointerType::get(Int8Ty, in EmitBuiltinExpr()
2204 E->getType()->getPointeeType().getAddressSpace())); in EmitBuiltinExpr()
2205 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); in EmitBuiltinExpr()
2207 if (Arg0->getType()->getPointerAddressSpace() != in EmitBuiltinExpr()
2208 NewArgT->getPointerAddressSpace()) in EmitBuiltinExpr()
2213 E->getDirectCallee()->getName()), {NewArg}); in EmitBuiltinExpr()
2214 return RValue::get(Builder.CreateBitOrPointerCast(NewCall, in EmitBuiltinExpr()
2215 ConvertType(E->getType()))); in EmitBuiltinExpr()
2218 // OpenCL v2.0, s6.13.17 - Enqueue kernel function. in EmitBuiltinExpr()
2222 unsigned NumArgs = E->getNumArgs(); in EmitBuiltinExpr()
2227 llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2228 llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
2229 llvm::Value *Range = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
2236 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2240 Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy); in EmitBuiltinExpr()
2242 return RValue::get(Builder.CreateCall( in EmitBuiltinExpr()
2248 if (E->getArg(3)->getType()->isBlockPointerType()) { in EmitBuiltinExpr()
2252 Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy); in EmitBuiltinExpr()
2256 ConstantInt::get(IntTy, NumArgs - 4)}; in EmitBuiltinExpr()
2262 llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I)); in EmitBuiltinExpr()
2265 .getTypeSizeInChars(E->getArg(I)->getType()) in EmitBuiltinExpr()
2272 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2274 return RValue::get( in EmitBuiltinExpr()
2282 E->getArg(4)->getType()->isArrayType() in EmitBuiltinExpr()
2283 ? E->getArg(4)->getType().getAddressSpace() in EmitBuiltinExpr()
2284 : E->getArg(4)->getType()->getPointeeType().getAddressSpace(); in EmitBuiltinExpr()
2286 EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS4)); in EmitBuiltinExpr()
2288 E->getArg(5)->getType()->getPointeeType().getAddressSpace(); in EmitBuiltinExpr()
2290 EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS5)); in EmitBuiltinExpr()
2292 llvm::Value *NumEvents = EmitScalarExpr(E->getArg(3)); in EmitBuiltinExpr()
2294 E->getArg(4)->getType()->isArrayType() in EmitBuiltinExpr()
2295 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() in EmitBuiltinExpr()
2296 : EmitScalarExpr(E->getArg(4)); in EmitBuiltinExpr()
2297 llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5)); in EmitBuiltinExpr()
2299 Builder.CreateBitCast(EmitScalarExpr(E->getArg(6)), Int8PtrTy); in EmitBuiltinExpr()
2310 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2312 return RValue::get( in EmitBuiltinExpr()
2318 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); in EmitBuiltinExpr()
2324 llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I)); in EmitBuiltinExpr()
2327 .getTypeSizeInChars(E->getArg(I)->getType()) in EmitBuiltinExpr()
2333 llvm::FunctionType *FTy = llvm::FunctionType::get( in EmitBuiltinExpr()
2335 return RValue::get( in EmitBuiltinExpr()
2340 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block in EmitBuiltinExpr()
2343 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2345 return RValue::get( in EmitBuiltinExpr()
2347 llvm::FunctionType::get(IntTy, Int8PtrTy, false), in EmitBuiltinExpr()
2352 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2354 return RValue::get(Builder.CreateCall( in EmitBuiltinExpr()
2356 llvm::FunctionType::get(IntTy, Int8PtrTy, false), in EmitBuiltinExpr()
2367 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); in EmitBuiltinExpr()
2372 // Fall through - it's already mapped to the intrinsic by GCCBuiltin. in EmitBuiltinExpr()
2387 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee())); in EmitBuiltinExpr()
2391 // This is down here to avoid non-target specific builtins, however, if in EmitBuiltinExpr()
2396 // See if we have a target specific intrinsic. in EmitBuiltinExpr()
2398 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; in EmitBuiltinExpr()
2401 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); in EmitBuiltinExpr()
2405 if (IntrinsicID == Intrinsic::not_intrinsic) in EmitBuiltinExpr()
2406 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name); in EmitBuiltinExpr()
2409 if (IntrinsicID != Intrinsic::not_intrinsic) { in EmitBuiltinExpr()
2420 llvm::FunctionType *FTy = F->getFunctionType(); in EmitBuiltinExpr()
2422 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { in EmitBuiltinExpr()
2426 ArgValue = EmitScalarExpr(E->getArg(i)); in EmitBuiltinExpr()
2429 // know that the generated intrinsic gets a ConstantInt. in EmitBuiltinExpr()
2431 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); in EmitBuiltinExpr()
2434 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); in EmitBuiltinExpr()
2437 // If the intrinsic arg type is different from the builtin arg type in EmitBuiltinExpr()
2439 llvm::Type *PTy = FTy->getParamType(i); in EmitBuiltinExpr()
2440 if (PTy != ArgValue->getType()) { in EmitBuiltinExpr()
2441 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && in EmitBuiltinExpr()
2450 QualType BuiltinRetType = E->getType(); in EmitBuiltinExpr()
2453 if (!BuiltinRetType->isVoidType()) in EmitBuiltinExpr()
2456 if (RetTy != V->getType()) { in EmitBuiltinExpr()
2457 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && in EmitBuiltinExpr()
2462 return RValue::get(V); in EmitBuiltinExpr()
2467 return RValue::get(V); in EmitBuiltinExpr()
2472 return GetUndefRValue(E->getType()); in EmitBuiltinExpr()
2483 return CGF->EmitARMBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2486 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2489 return CGF->EmitX86BuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2493 return CGF->EmitPPCBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2496 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2498 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2501 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2504 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
2516 getContext().getAuxTargetInfo()->getTriple().getArch()); in EmitTargetBuiltinExpr()
2530 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); in GetNeonType()
2534 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
2536 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); in GetNeonType()
2539 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); in GetNeonType()
2541 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. in GetNeonType()
2543 // so we use v16i8 to represent poly128 and get pattern matched. in GetNeonType()
2544 return llvm::VectorType::get(CGF->Int8Ty, 16); in GetNeonType()
2546 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); in GetNeonType()
2548 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); in GetNeonType()
2558 return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad)); in GetFloatNeonType()
2560 return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad)); in GetFloatNeonType()
2562 llvm_unreachable("Type can't be converted to floating-point!"); in GetFloatNeonType()
2567 unsigned nElts = V->getType()->getVectorNumElements(); in EmitNeonSplat()
2576 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); in EmitNeonCall()
2579 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); in EmitNeonCall()
2581 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); in EmitNeonCall()
2588 int SV = cast<ConstantInt>(V)->getSExtValue(); in EmitNeonShiftVector()
2589 return ConstantInt::get(Ty, neg ? -SV : SV); in EmitNeonShiftVector()
2592 // \brief Right-shift a vector by a constant.
2598 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); in EmitNeonRShiftImm()
2599 int EltSize = VTy->getScalarSizeInBits(); in EmitNeonRShiftImm()
2607 // Right-shifting an unsigned value by its size yields 0. in EmitNeonRShiftImm()
2608 return llvm::ConstantAggregateZero::get(VTy); in EmitNeonRShiftImm()
2610 // Right-shifting a signed value by its size is equivalent in EmitNeonRShiftImm()
2611 // to a shift of size-1. in EmitNeonRShiftImm()
2612 --ShiftAmt; in EmitNeonRShiftImm()
2613 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); in EmitNeonRShiftImm()
2668 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
2672 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
3226 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) in findNeonIntrinsicInMap()
3245 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); in LookupNeonLLVMIntrinsic()
3247 Ty = llvm::VectorType::get( in LookupNeonLLVMIntrinsic()
3248 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); in LookupNeonLLVMIntrinsic()
3255 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; in LookupNeonLLVMIntrinsic()
3256 ArgType = llvm::VectorType::get(ArgType, Elts); in LookupNeonLLVMIntrinsic()
3294 // with swapped operands. The table gives us the right intrinsic but we in EmitCommonNeonSISDBuiltinExpr()
3300 assert(Int && "Generic code assumes a valid intrinsic"); in EmitCommonNeonSISDBuiltinExpr()
3302 // Determine the type(s) of this overloaded AArch64 intrinsic. in EmitCommonNeonSISDBuiltinExpr()
3303 const Expr *Arg = E->getArg(0); in EmitCommonNeonSISDBuiltinExpr()
3304 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); in EmitCommonNeonSISDBuiltinExpr()
3308 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); in EmitCommonNeonSISDBuiltinExpr()
3309 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); in EmitCommonNeonSISDBuiltinExpr()
3311 llvm::Type *ArgTy = ai->getType(); in EmitCommonNeonSISDBuiltinExpr()
3312 if (Ops[j]->getType()->getPrimitiveSizeInBits() == in EmitCommonNeonSISDBuiltinExpr()
3313 ArgTy->getPrimitiveSizeInBits()) in EmitCommonNeonSISDBuiltinExpr()
3316 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); in EmitCommonNeonSISDBuiltinExpr()
3317 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate in EmitCommonNeonSISDBuiltinExpr()
3320 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType()); in EmitCommonNeonSISDBuiltinExpr()
3322 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); in EmitCommonNeonSISDBuiltinExpr()
3326 llvm::Type *ResultType = CGF.ConvertType(E->getType()); in EmitCommonNeonSISDBuiltinExpr()
3327 if (ResultType->getPrimitiveSizeInBits() < in EmitCommonNeonSISDBuiltinExpr()
3328 Result->getType()->getPrimitiveSizeInBits()) in EmitCommonNeonSISDBuiltinExpr()
3338 // Get the last argument, which specifies the vector type. in EmitCommonNeonBuiltinExpr()
3340 const Expr *Arg = E->getArg(E->getNumArgs() - 1); in EmitCommonNeonBuiltinExpr()
3341 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext())) in EmitCommonNeonBuiltinExpr()
3344 // Determine the type of this overloaded NEON intrinsic. in EmitCommonNeonBuiltinExpr()
3354 auto getAlignmentValue32 = [&](Address addr) -> Value* { in EmitCommonNeonBuiltinExpr()
3366 if (VTy->getElementType()->isFloatingPointTy()) in EmitCommonNeonBuiltinExpr()
3367 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); in EmitCommonNeonBuiltinExpr()
3380 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); in EmitCommonNeonBuiltinExpr()
3395 llvm::Type *VecFlt = llvm::VectorType::get( in EmitCommonNeonBuiltinExpr()
3396 VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy, in EmitCommonNeonBuiltinExpr()
3397 VTy->getNumElements()); in EmitCommonNeonBuiltinExpr()
3404 // We generate target-independent intrinsic, which needs a second argument in EmitCommonNeonBuiltinExpr()
3484 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); in EmitCommonNeonBuiltinExpr()
3486 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitCommonNeonBuiltinExpr()
3495 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); in EmitCommonNeonBuiltinExpr()
3500 // NEON intrinsic puts accumulator first, unlike the LLVM fma. in EmitCommonNeonBuiltinExpr()
3519 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitCommonNeonBuiltinExpr()
3525 Value *V = UndefValue::get(Ty); in EmitCommonNeonBuiltinExpr()
3526 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); in EmitCommonNeonBuiltinExpr()
3529 llvm::Constant *CI = ConstantInt::get(SizeTy, 0); in EmitCommonNeonBuiltinExpr()
3541 for (unsigned I = 2; I < Ops.size() - 1; ++I) in EmitCommonNeonBuiltinExpr()
3545 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitCommonNeonBuiltinExpr()
3566 // intrinsic for now. in EmitCommonNeonBuiltinExpr()
3567 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; in EmitCommonNeonBuiltinExpr()
3568 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; in EmitCommonNeonBuiltinExpr()
3573 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); in EmitCommonNeonBuiltinExpr()
3575 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); in EmitCommonNeonBuiltinExpr()
3577 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); in EmitCommonNeonBuiltinExpr()
3584 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); in EmitCommonNeonBuiltinExpr()
3585 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); in EmitCommonNeonBuiltinExpr()
3587 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); in EmitCommonNeonBuiltinExpr()
3611 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; in EmitCommonNeonBuiltinExpr()
3675 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); in EmitCommonNeonBuiltinExpr()
3690 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitCommonNeonBuiltinExpr()
3706 ConstantAggregateZero::get(Ty)); in EmitCommonNeonBuiltinExpr()
3718 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitCommonNeonBuiltinExpr()
3736 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitCommonNeonBuiltinExpr()
3748 assert(Int && "Expected valid intrinsic number"); in EmitCommonNeonBuiltinExpr()
3750 // Determine the type(s) of this overloaded AArch64 intrinsic. in EmitCommonNeonBuiltinExpr()
3754 llvm::Type *ResultType = ConvertType(E->getType()); in EmitCommonNeonBuiltinExpr()
3755 // AArch64 intrinsic one-element vector type cast to in EmitCommonNeonBuiltinExpr()
3763 llvm::Type *OTy = Op->getType(); in EmitAArch64CompareBuiltinExpr()
3770 OTy = BI->getOperand(0)->getType(); in EmitAArch64CompareBuiltinExpr()
3773 if (OTy->getScalarType()->isFloatingPointTy()) { in EmitAArch64CompareBuiltinExpr()
3791 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType()); in packTBLDVectorList()
3792 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { in packTBLDVectorList()
3797 int PairPos = 0, End = Ops.size() - 1; in packTBLDVectorList()
3805 // If there's an odd number of 64-bit lookup table, fill the high 64-bit in packTBLDVectorList()
3806 // of the 128-bit lookup table with zero. in packTBLDVectorList()
3808 Value *ZeroTbl = ConstantAggregateZero::get(TblTy); in packTBLDVectorList()
3850 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), in GetValueForARMHint()
3851 llvm::ConstantInt::get(Int32Ty, Value)); in GetValueForARMHint()
3864 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) in EmitSpecialRegisterBuiltin()
3872 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); in EmitSpecialRegisterBuiltin()
3873 SysReg = cast<StringLiteral>(SysRegStrExpr)->getString(); in EmitSpecialRegisterBuiltin()
3876 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; in EmitSpecialRegisterBuiltin()
3877 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); in EmitSpecialRegisterBuiltin()
3878 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); in EmitSpecialRegisterBuiltin()
3882 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); in EmitSpecialRegisterBuiltin()
3883 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) in EmitSpecialRegisterBuiltin()
3884 && "Can't fit 64-bit value in 32-bit register"); in EmitSpecialRegisterBuiltin()
3887 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); in EmitSpecialRegisterBuiltin()
3894 if (ValueType->isPointerTy()) in EmitSpecialRegisterBuiltin()
3901 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); in EmitSpecialRegisterBuiltin()
3902 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); in EmitSpecialRegisterBuiltin()
3909 if (ValueType->isPointerTy()) { in EmitSpecialRegisterBuiltin()
3918 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
3962 llvm::FunctionType::get(VoidTy, /*Variadic=*/false); in EmitARMBuiltinExpr()
3965 if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext())) in EmitARMBuiltinExpr()
3971 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", in EmitARMBuiltinExpr()
3973 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", in EmitARMBuiltinExpr()
3980 Value *Option = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
3981 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); in EmitARMBuiltinExpr()
3985 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
3986 Value *RW = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
3987 Value *IsData = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
3990 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); in EmitARMBuiltinExpr()
3992 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); in EmitARMBuiltinExpr()
3997 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit), in EmitARMBuiltinExpr()
3998 EmitScalarExpr(E->getArg(0)), in EmitARMBuiltinExpr()
4003 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); in EmitARMBuiltinExpr()
4004 const FunctionDecl *FD = E->getDirectCallee(); in EmitARMBuiltinExpr()
4007 Ops[i] = EmitScalarExpr(E->getArg(i)); in EmitARMBuiltinExpr()
4008 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); in EmitARMBuiltinExpr()
4010 StringRef Name = FD->getName(); in EmitARMBuiltinExpr()
4021 F = CGM.getIntrinsic(Intrinsic::arm_mcrr); in EmitARMBuiltinExpr()
4024 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); in EmitARMBuiltinExpr()
4029 // the intrinsic has 4 because Rt and Rt2 in EmitARMBuiltinExpr()
4031 // bit integer in the intrinsic definition in EmitARMBuiltinExpr()
4035 Value *Coproc = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4036 Value *Opc1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
4037 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
4038 Value *CRm = EmitScalarExpr(E->getArg(3)); in EmitARMBuiltinExpr()
4040 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); in EmitARMBuiltinExpr()
4055 F = CGM.getIntrinsic(Intrinsic::arm_mrrc); in EmitARMBuiltinExpr()
4058 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); in EmitARMBuiltinExpr()
4062 Value *Coproc = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4063 Value *Opc1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
4064 Value *CRm = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
4075 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); in EmitARMBuiltinExpr()
4079 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); in EmitARMBuiltinExpr()
4085 getContext().getTypeSize(E->getType()) == 64) || in EmitARMBuiltinExpr()
4092 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); in EmitARMBuiltinExpr()
4097 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); in EmitARMBuiltinExpr()
4101 Value *LdPtr = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4110 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); in EmitARMBuiltinExpr()
4113 return Builder.CreateBitCast(Val, ConvertType(E->getType())); in EmitARMBuiltinExpr()
4118 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4120 QualType Ty = E->getType(); in EmitARMBuiltinExpr()
4122 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(), in EmitARMBuiltinExpr()
4124 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo()); in EmitARMBuiltinExpr()
4127 ? Intrinsic::arm_ldaex in EmitARMBuiltinExpr()
4128 : Intrinsic::arm_ldrex, in EmitARMBuiltinExpr()
4129 LoadAddr->getType()); in EmitARMBuiltinExpr()
4132 if (RealResTy->isPointerTy()) in EmitARMBuiltinExpr()
4143 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { in EmitARMBuiltinExpr()
4145 ? Intrinsic::arm_stlexd in EmitARMBuiltinExpr()
4146 : Intrinsic::arm_strexd); in EmitARMBuiltinExpr()
4147 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr); in EmitARMBuiltinExpr()
4149 Address Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitARMBuiltinExpr()
4150 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4158 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); in EmitARMBuiltinExpr()
4164 Value *StoreVal = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4165 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
4167 QualType Ty = E->getArg(0)->getType(); in EmitARMBuiltinExpr()
4168 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), in EmitARMBuiltinExpr()
4170 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); in EmitARMBuiltinExpr()
4172 if (StoreVal->getType()->isPointerTy()) in EmitARMBuiltinExpr()
4180 ? Intrinsic::arm_stlex in EmitARMBuiltinExpr()
4181 : Intrinsic::arm_strex, in EmitARMBuiltinExpr()
4182 StoreAddr->getType()); in EmitARMBuiltinExpr()
4187 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); in EmitARMBuiltinExpr()
4192 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; in EmitARMBuiltinExpr()
4195 CRCIntrinsicID = Intrinsic::arm_crc32b; break; in EmitARMBuiltinExpr()
4197 CRCIntrinsicID = Intrinsic::arm_crc32cb; break; in EmitARMBuiltinExpr()
4199 CRCIntrinsicID = Intrinsic::arm_crc32h; break; in EmitARMBuiltinExpr()
4201 CRCIntrinsicID = Intrinsic::arm_crc32ch; break; in EmitARMBuiltinExpr()
4204 CRCIntrinsicID = Intrinsic::arm_crc32w; break; in EmitARMBuiltinExpr()
4207 CRCIntrinsicID = Intrinsic::arm_crc32cw; break; in EmitARMBuiltinExpr()
4210 if (CRCIntrinsicID != Intrinsic::not_intrinsic) { in EmitARMBuiltinExpr()
4211 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
4212 Value *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
4218 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); in EmitARMBuiltinExpr()
4272 auto getAlignmentValue32 = [&](Address addr) -> Value* { in EmitARMBuiltinExpr()
4280 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); in EmitARMBuiltinExpr()
4306 // Get the alignment for the argument in addition to the value; in EmitARMBuiltinExpr()
4308 PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); in EmitARMBuiltinExpr()
4330 // Get the alignment for the argument in addition to the value; in EmitARMBuiltinExpr()
4332 PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); in EmitARMBuiltinExpr()
4339 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitARMBuiltinExpr()
4342 // that the generated intrinsic gets a ConstantInt. in EmitARMBuiltinExpr()
4344 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); in EmitARMBuiltinExpr()
4346 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); in EmitARMBuiltinExpr()
4378 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, in EmitARMBuiltinExpr()
4381 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, in EmitARMBuiltinExpr()
4384 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, in EmitARMBuiltinExpr()
4387 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, in EmitARMBuiltinExpr()
4391 // the first argument, but the LLVM intrinsic expects it as the third one. in EmitARMBuiltinExpr()
4395 Intrinsic::arm_mcr : Intrinsic::arm_mcr2); in EmitARMBuiltinExpr()
4401 // Get the last argument, which specifies the vector type. in EmitARMBuiltinExpr()
4404 const Expr *Arg = E->getArg(E->getNumArgs()-1); in EmitARMBuiltinExpr()
4405 if (!Arg->isIntegerConstantExpr(Result, getContext())) in EmitARMBuiltinExpr()
4419 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; in EmitARMBuiltinExpr()
4421 // Call the appropriate intrinsic. in EmitARMBuiltinExpr()
4426 // Determine the type of this overloaded NEON intrinsic. in EmitARMBuiltinExpr()
4443 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, in EmitARMBuiltinExpr()
4444 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1); in EmitARMBuiltinExpr()
4450 // Handle 64-bit integer elements as a special case. Use shuffles of in EmitARMBuiltinExpr()
4451 // one-element vectors to avoid poor code for i64 in the backend. in EmitARMBuiltinExpr()
4452 if (VTy->getElementType()->isIntegerTy(64)) { in EmitARMBuiltinExpr()
4455 uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); in EmitARMBuiltinExpr()
4456 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); in EmitARMBuiltinExpr()
4458 // Load the value as a one-element vector. in EmitARMBuiltinExpr()
4459 Ty = llvm::VectorType::get(VTy->getElementType(), 1); in EmitARMBuiltinExpr()
4461 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); in EmitARMBuiltinExpr()
4465 uint32_t Indices[] = {1 - Lane, Lane}; in EmitARMBuiltinExpr()
4466 SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices); in EmitARMBuiltinExpr()
4472 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); in EmitARMBuiltinExpr()
4479 // Handle 64-bit elements as a special-case. There is no "dup" needed. in EmitARMBuiltinExpr()
4480 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { in EmitARMBuiltinExpr()
4483 Int = Intrinsic::arm_neon_vld2; in EmitARMBuiltinExpr()
4486 Int = Intrinsic::arm_neon_vld3; in EmitARMBuiltinExpr()
4489 Int = Intrinsic::arm_neon_vld4; in EmitARMBuiltinExpr()
4491 default: llvm_unreachable("unknown vld_dup intrinsic?"); in EmitARMBuiltinExpr()
4497 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitARMBuiltinExpr()
4503 Int = Intrinsic::arm_neon_vld2lane; in EmitARMBuiltinExpr()
4506 Int = Intrinsic::arm_neon_vld3lane; in EmitARMBuiltinExpr()
4509 Int = Intrinsic::arm_neon_vld4lane; in EmitARMBuiltinExpr()
4511 default: llvm_unreachable("unknown vld_dup intrinsic?"); in EmitARMBuiltinExpr()
4515 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); in EmitARMBuiltinExpr()
4519 Args.append(STy->getNumElements(), UndefValue::get(Ty)); in EmitARMBuiltinExpr()
4521 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); in EmitARMBuiltinExpr()
4527 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in EmitARMBuiltinExpr()
4531 Elt = Builder.CreateBitCast(Elt, Val->getType()); in EmitARMBuiltinExpr()
4534 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitARMBuiltinExpr()
4540 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; in EmitARMBuiltinExpr()
4544 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), in EmitARMBuiltinExpr()
4547 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; in EmitARMBuiltinExpr()
4551 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), in EmitARMBuiltinExpr()
4555 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), in EmitARMBuiltinExpr()
4558 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), in EmitARMBuiltinExpr()
4565 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; in EmitARMBuiltinExpr()
4574 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), in EmitARMBuiltinExpr()
4582 // Handle 64-bit integer elements as a special case. Use a shuffle to get in EmitARMBuiltinExpr()
4583 // a one-element vector and avoid poor code for i64 in the backend. in EmitARMBuiltinExpr()
4584 if (VTy->getElementType()->isIntegerTy(64)) { in EmitARMBuiltinExpr()
4586 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); in EmitARMBuiltinExpr()
4589 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; in EmitARMBuiltinExpr()
4590 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, in EmitARMBuiltinExpr()
4597 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitARMBuiltinExpr()
4602 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), in EmitARMBuiltinExpr()
4605 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), in EmitARMBuiltinExpr()
4608 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), in EmitARMBuiltinExpr()
4611 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), in EmitARMBuiltinExpr()
4614 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), in EmitARMBuiltinExpr()
4617 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), in EmitARMBuiltinExpr()
4620 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), in EmitARMBuiltinExpr()
4623 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), in EmitARMBuiltinExpr()
4665 assert(E->getNumArgs() >= 3); in EmitAArch64TblBuiltinExpr()
4667 // Get the last argument, which specifies the vector type. in EmitAArch64TblBuiltinExpr()
4669 const Expr *Arg = E->getArg(E->getNumArgs() - 1); in EmitAArch64TblBuiltinExpr()
4670 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext())) in EmitAArch64TblBuiltinExpr()
4673 // Determine the type of this overloaded NEON intrinsic. in EmitAArch64TblBuiltinExpr()
4686 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, in EmitAArch64TblBuiltinExpr()
4691 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, in EmitAArch64TblBuiltinExpr()
4696 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, in EmitAArch64TblBuiltinExpr()
4701 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, in EmitAArch64TblBuiltinExpr()
4707 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); in EmitAArch64TblBuiltinExpr()
4709 llvm::Constant *EightV = ConstantInt::get(Ty, 8); in EmitAArch64TblBuiltinExpr()
4719 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, in EmitAArch64TblBuiltinExpr()
4725 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); in EmitAArch64TblBuiltinExpr()
4727 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); in EmitAArch64TblBuiltinExpr()
4738 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, in EmitAArch64TblBuiltinExpr()
4743 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; in EmitAArch64TblBuiltinExpr()
4746 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; in EmitAArch64TblBuiltinExpr()
4749 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; in EmitAArch64TblBuiltinExpr()
4752 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; in EmitAArch64TblBuiltinExpr()
4755 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; in EmitAArch64TblBuiltinExpr()
4758 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; in EmitAArch64TblBuiltinExpr()
4761 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; in EmitAArch64TblBuiltinExpr()
4764 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; in EmitAArch64TblBuiltinExpr()
4776 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4); in vectorWrapScalar16()
4778 Value *V = UndefValue::get(VTy); in vectorWrapScalar16()
4779 llvm::Constant *CI = ConstantInt::get(SizeTy, 0); in vectorWrapScalar16()
4786 unsigned HintID = static_cast<unsigned>(-1); in EmitAArch64BuiltinExpr()
4809 if (HintID != static_cast<unsigned>(-1)) { in EmitAArch64BuiltinExpr()
4810 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); in EmitAArch64BuiltinExpr()
4811 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); in EmitAArch64BuiltinExpr()
4815 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4816 Value *RW = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
4817 Value *CacheLevel = EmitScalarExpr(E->getArg(2)); in EmitAArch64BuiltinExpr()
4818 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); in EmitAArch64BuiltinExpr()
4819 Value *IsData = EmitScalarExpr(E->getArg(4)); in EmitAArch64BuiltinExpr()
4822 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { in EmitAArch64BuiltinExpr()
4824 Locality = llvm::ConstantInt::get(Int32Ty, in EmitAArch64BuiltinExpr()
4825 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); in EmitAArch64BuiltinExpr()
4828 Locality = llvm::ConstantInt::get(Int32Ty, 0); in EmitAArch64BuiltinExpr()
4831 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify in EmitAArch64BuiltinExpr()
4833 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); in EmitAArch64BuiltinExpr()
4838 assert((getContext().getTypeSize(E->getType()) == 32) && in EmitAArch64BuiltinExpr()
4840 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4842 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit"); in EmitAArch64BuiltinExpr()
4845 assert((getContext().getTypeSize(E->getType()) == 64) && in EmitAArch64BuiltinExpr()
4847 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4849 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit"); in EmitAArch64BuiltinExpr()
4853 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); in EmitAArch64BuiltinExpr()
4854 const FunctionDecl *FD = E->getDirectCallee(); in EmitAArch64BuiltinExpr()
4857 Ops[i] = EmitScalarExpr(E->getArg(i)); in EmitAArch64BuiltinExpr()
4858 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); in EmitAArch64BuiltinExpr()
4860 StringRef Name = FD->getName(); in EmitAArch64BuiltinExpr()
4866 getContext().getTypeSize(E->getType()) == 128) { in EmitAArch64BuiltinExpr()
4868 ? Intrinsic::aarch64_ldaxp in EmitAArch64BuiltinExpr()
4869 : Intrinsic::aarch64_ldxp); in EmitAArch64BuiltinExpr()
4871 Value *LdPtr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4877 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); in EmitAArch64BuiltinExpr()
4881 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); in EmitAArch64BuiltinExpr()
4884 return Builder.CreateBitCast(Val, ConvertType(E->getType())); in EmitAArch64BuiltinExpr()
4887 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4889 QualType Ty = E->getType(); in EmitAArch64BuiltinExpr()
4891 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(), in EmitAArch64BuiltinExpr()
4893 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo()); in EmitAArch64BuiltinExpr()
4896 ? Intrinsic::aarch64_ldaxr in EmitAArch64BuiltinExpr()
4897 : Intrinsic::aarch64_ldxr, in EmitAArch64BuiltinExpr()
4898 LoadAddr->getType()); in EmitAArch64BuiltinExpr()
4901 if (RealResTy->isPointerTy()) in EmitAArch64BuiltinExpr()
4910 getContext().getTypeSize(E->getArg(0)->getType()) == 128) { in EmitAArch64BuiltinExpr()
4912 ? Intrinsic::aarch64_stlxp in EmitAArch64BuiltinExpr()
4913 : Intrinsic::aarch64_stxp); in EmitAArch64BuiltinExpr()
4914 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr); in EmitAArch64BuiltinExpr()
4916 Address Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitAArch64BuiltinExpr()
4917 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); in EmitAArch64BuiltinExpr()
4924 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
4931 Value *StoreVal = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4932 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
4934 QualType Ty = E->getArg(0)->getType(); in EmitAArch64BuiltinExpr()
4935 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), in EmitAArch64BuiltinExpr()
4937 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); in EmitAArch64BuiltinExpr()
4939 if (StoreVal->getType()->isPointerTy()) in EmitAArch64BuiltinExpr()
4947 ? Intrinsic::aarch64_stlxr in EmitAArch64BuiltinExpr()
4948 : Intrinsic::aarch64_stxr, in EmitAArch64BuiltinExpr()
4949 StoreAddr->getType()); in EmitAArch64BuiltinExpr()
4954 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); in EmitAArch64BuiltinExpr()
4959 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; in EmitAArch64BuiltinExpr()
4962 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; in EmitAArch64BuiltinExpr()
4964 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; in EmitAArch64BuiltinExpr()
4966 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; in EmitAArch64BuiltinExpr()
4968 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; in EmitAArch64BuiltinExpr()
4970 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; in EmitAArch64BuiltinExpr()
4972 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; in EmitAArch64BuiltinExpr()
4974 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; in EmitAArch64BuiltinExpr()
4976 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; in EmitAArch64BuiltinExpr()
4979 if (CRCIntrinsicID != Intrinsic::not_intrinsic) { in EmitAArch64BuiltinExpr()
4980 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
4981 Value *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
4984 llvm::Type *DataTy = F->getFunctionType()->getParamType(1); in EmitAArch64BuiltinExpr()
5028 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { in EmitAArch64BuiltinExpr()
5030 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitAArch64BuiltinExpr()
5033 // that the generated intrinsic gets a ConstantInt. in EmitAArch64BuiltinExpr()
5035 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); in EmitAArch64BuiltinExpr()
5038 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); in EmitAArch64BuiltinExpr()
5047 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); in EmitAArch64BuiltinExpr()
5049 assert(Result && "SISD intrinsic should have been handled"); in EmitAArch64BuiltinExpr()
5054 const Expr *Arg = E->getArg(E->getNumArgs()-1); in EmitAArch64BuiltinExpr()
5056 if (Arg->isIntegerConstantExpr(Result, getContext())) in EmitAArch64BuiltinExpr()
5057 // Determine the type of this overloaded NEON intrinsic. in EmitAArch64BuiltinExpr()
5063 // Handle non-overloaded intrinsics first. in EmitAArch64BuiltinExpr()
5068 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); in EmitAArch64BuiltinExpr()
5074 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); in EmitAArch64BuiltinExpr()
5082 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5083 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; in EmitAArch64BuiltinExpr()
5097 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5098 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; in EmitAArch64BuiltinExpr()
5107 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); in EmitAArch64BuiltinExpr()
5108 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
5111 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); in EmitAArch64BuiltinExpr()
5112 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); in EmitAArch64BuiltinExpr()
5120 llvm::VectorType::get(DoubleTy, 2); in EmitAArch64BuiltinExpr()
5121 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
5124 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); in EmitAArch64BuiltinExpr()
5125 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); in EmitAArch64BuiltinExpr()
5133 llvm::VectorType::get(FloatTy, 2); in EmitAArch64BuiltinExpr()
5134 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
5137 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); in EmitAArch64BuiltinExpr()
5138 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); in EmitAArch64BuiltinExpr()
5147 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5149 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
5154 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5156 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
5161 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5163 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
5168 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5170 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
5175 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5177 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
5181 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5201 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5221 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5251 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5259 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5277 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5282 llvm::VectorType::get(DoubleTy, 1)); in EmitAArch64BuiltinExpr()
5283 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5288 llvm::VectorType::get(DoubleTy, 2)); in EmitAArch64BuiltinExpr()
5289 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5294 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8)); in EmitAArch64BuiltinExpr()
5295 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5299 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16)); in EmitAArch64BuiltinExpr()
5300 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5304 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4)); in EmitAArch64BuiltinExpr()
5305 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5309 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8)); in EmitAArch64BuiltinExpr()
5310 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5314 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2)); in EmitAArch64BuiltinExpr()
5315 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5319 llvm::VectorType::get(FloatTy, 2)); in EmitAArch64BuiltinExpr()
5320 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5324 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); in EmitAArch64BuiltinExpr()
5325 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5329 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1)); in EmitAArch64BuiltinExpr()
5330 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5334 llvm::VectorType::get(DoubleTy, 1)); in EmitAArch64BuiltinExpr()
5335 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5339 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); in EmitAArch64BuiltinExpr()
5340 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5344 llvm::VectorType::get(FloatTy, 2)); in EmitAArch64BuiltinExpr()
5345 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5349 llvm::VectorType::get(DoubleTy, 1)); in EmitAArch64BuiltinExpr()
5350 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5355 llvm::VectorType::get(FloatTy, 4)); in EmitAArch64BuiltinExpr()
5356 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5361 llvm::VectorType::get(DoubleTy, 2)); in EmitAArch64BuiltinExpr()
5362 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
5366 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); in EmitAArch64BuiltinExpr()
5369 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); in EmitAArch64BuiltinExpr()
5374 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); in EmitAArch64BuiltinExpr()
5375 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); in EmitAArch64BuiltinExpr()
5376 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), in EmitAArch64BuiltinExpr()
5378 Constant *CI = ConstantInt::get(SizeTy, 0); in EmitAArch64BuiltinExpr()
5382 ? Intrinsic::aarch64_neon_sqadd in EmitAArch64BuiltinExpr()
5383 : Intrinsic::aarch64_neon_sqsub; in EmitAArch64BuiltinExpr()
5387 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5389 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), in EmitAArch64BuiltinExpr()
5395 ? Intrinsic::aarch64_neon_uqshl in EmitAArch64BuiltinExpr()
5396 : Intrinsic::aarch64_neon_sqshl; in EmitAArch64BuiltinExpr()
5397 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5404 ? Intrinsic::aarch64_neon_urshl in EmitAArch64BuiltinExpr()
5405 : Intrinsic::aarch64_neon_srshl; in EmitAArch64BuiltinExpr()
5406 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5407 int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); in EmitAArch64BuiltinExpr()
5408 Ops[1] = ConstantInt::get(Int64Ty, -SV); in EmitAArch64BuiltinExpr()
5414 ? Intrinsic::aarch64_neon_urshl in EmitAArch64BuiltinExpr()
5415 : Intrinsic::aarch64_neon_srshl; in EmitAArch64BuiltinExpr()
5417 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); in EmitAArch64BuiltinExpr()
5424 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5426 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); in EmitAArch64BuiltinExpr()
5429 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5431 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), in EmitAArch64BuiltinExpr()
5432 Amt->getZExtValue())), in EmitAArch64BuiltinExpr()
5436 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5437 uint64_t ShiftAmt = Amt->getZExtValue(); in EmitAArch64BuiltinExpr()
5438 // Right-shifting an unsigned value by its size yields 0. in EmitAArch64BuiltinExpr()
5440 return ConstantInt::get(Int64Ty, 0); in EmitAArch64BuiltinExpr()
5441 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), in EmitAArch64BuiltinExpr()
5445 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5447 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), in EmitAArch64BuiltinExpr()
5448 Amt->getZExtValue())), in EmitAArch64BuiltinExpr()
5453 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5454 uint64_t ShiftAmt = Amt->getZExtValue(); in EmitAArch64BuiltinExpr()
5455 // Right-shifting an unsigned value by its size yields 0. in EmitAArch64BuiltinExpr()
5459 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), in EmitAArch64BuiltinExpr()
5467 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), in EmitAArch64BuiltinExpr()
5472 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); in EmitAArch64BuiltinExpr()
5473 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), in EmitAArch64BuiltinExpr()
5475 Constant *CI = ConstantInt::get(SizeTy, 0); in EmitAArch64BuiltinExpr()
5481 ? Intrinsic::aarch64_neon_sqadd in EmitAArch64BuiltinExpr()
5482 : Intrinsic::aarch64_neon_sqsub; in EmitAArch64BuiltinExpr()
5489 ProductOps.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
5491 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), in EmitAArch64BuiltinExpr()
5495 ? Intrinsic::aarch64_neon_sqadd in EmitAArch64BuiltinExpr()
5496 : Intrinsic::aarch64_neon_sqsub; in EmitAArch64BuiltinExpr()
5503 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), in EmitAArch64BuiltinExpr()
5509 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), in EmitAArch64BuiltinExpr()
5515 ? Intrinsic::aarch64_neon_sqadd in EmitAArch64BuiltinExpr()
5516 : Intrinsic::aarch64_neon_sqsub; in EmitAArch64BuiltinExpr()
5533 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, in EmitAArch64BuiltinExpr()
5534 Builtin->NameHint, Builtin->TypeModifier, E, Ops, in EmitAArch64BuiltinExpr()
5568 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) : in EmitAArch64BuiltinExpr()
5571 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst); in EmitAArch64BuiltinExpr()
5576 Int = Intrinsic::fma; in EmitAArch64BuiltinExpr()
5582 if (VTy && VTy->getElementType() == DoubleTy) { in EmitAArch64BuiltinExpr()
5589 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); in EmitAArch64BuiltinExpr()
5593 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); in EmitAArch64BuiltinExpr()
5597 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), in EmitAArch64BuiltinExpr()
5598 VTy->getNumElements() * 2); in EmitAArch64BuiltinExpr()
5600 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), in EmitAArch64BuiltinExpr()
5607 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); in EmitAArch64BuiltinExpr()
5619 Ops.push_back(EmitScalarExpr(E->getArg(3))); in EmitAArch64BuiltinExpr()
5620 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); in EmitAArch64BuiltinExpr()
5621 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); in EmitAArch64BuiltinExpr()
5627 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; in EmitAArch64BuiltinExpr()
5628 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; in EmitAArch64BuiltinExpr()
5633 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; in EmitAArch64BuiltinExpr()
5634 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; in EmitAArch64BuiltinExpr()
5639 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; in EmitAArch64BuiltinExpr()
5640 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; in EmitAArch64BuiltinExpr()
5645 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; in EmitAArch64BuiltinExpr()
5646 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; in EmitAArch64BuiltinExpr()
5650 unsigned ArgElts = VTy->getNumElements(); in EmitAArch64BuiltinExpr()
5651 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); in EmitAArch64BuiltinExpr()
5652 unsigned BitWidth = EltTy->getBitWidth(); in EmitAArch64BuiltinExpr()
5653 llvm::Type *ArgTy = llvm::VectorType::get( in EmitAArch64BuiltinExpr()
5654 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts); in EmitAArch64BuiltinExpr()
5656 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; in EmitAArch64BuiltinExpr()
5661 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); in EmitAArch64BuiltinExpr()
5667 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; in EmitAArch64BuiltinExpr()
5668 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; in EmitAArch64BuiltinExpr()
5673 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; in EmitAArch64BuiltinExpr()
5674 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; in EmitAArch64BuiltinExpr()
5678 Int = Intrinsic::aarch64_neon_fminnm; in EmitAArch64BuiltinExpr()
5682 Int = Intrinsic::aarch64_neon_fmaxnm; in EmitAArch64BuiltinExpr()
5685 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5686 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), in EmitAArch64BuiltinExpr()
5690 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
5691 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), in EmitAArch64BuiltinExpr()
5695 Int = Intrinsic::aarch64_neon_sqshrun; in EmitAArch64BuiltinExpr()
5698 Int = Intrinsic::aarch64_neon_sqrshrun; in EmitAArch64BuiltinExpr()
5701 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; in EmitAArch64BuiltinExpr()
5704 Int = Intrinsic::aarch64_neon_rshrn; in EmitAArch64BuiltinExpr()
5707 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; in EmitAArch64BuiltinExpr()
5711 Int = Intrinsic::round; in EmitAArch64BuiltinExpr()
5716 Int = Intrinsic::nearbyint; in EmitAArch64BuiltinExpr()
5721 Int = Intrinsic::floor; in EmitAArch64BuiltinExpr()
5726 Int = Intrinsic::aarch64_neon_frintn; in EmitAArch64BuiltinExpr()
5731 Int = Intrinsic::ceil; in EmitAArch64BuiltinExpr()
5736 Int = Intrinsic::rint; in EmitAArch64BuiltinExpr()
5741 Int = Intrinsic::trunc; in EmitAArch64BuiltinExpr()
5807 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; in EmitAArch64BuiltinExpr()
5819 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; in EmitAArch64BuiltinExpr()
5831 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; in EmitAArch64BuiltinExpr()
5843 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; in EmitAArch64BuiltinExpr()
5849 Int = Intrinsic::aarch64_neon_fmulx; in EmitAArch64BuiltinExpr()
5867 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); in EmitAArch64BuiltinExpr()
5870 Int = Intrinsic::aarch64_neon_fmaxnmp; in EmitAArch64BuiltinExpr()
5875 Int = Intrinsic::aarch64_neon_fminnmp; in EmitAArch64BuiltinExpr()
5880 Int = Intrinsic::sqrt; in EmitAArch64BuiltinExpr()
5886 Int = Intrinsic::aarch64_neon_rbit; in EmitAArch64BuiltinExpr()
5894 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; in EmitAArch64BuiltinExpr()
5896 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
5898 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5906 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; in EmitAArch64BuiltinExpr()
5908 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
5910 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5918 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; in EmitAArch64BuiltinExpr()
5920 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
5922 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5930 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; in EmitAArch64BuiltinExpr()
5932 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
5934 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5939 Int = Intrinsic::aarch64_neon_umaxv; in EmitAArch64BuiltinExpr()
5941 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
5943 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5948 Int = Intrinsic::aarch64_neon_umaxv; in EmitAArch64BuiltinExpr()
5950 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
5952 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5957 Int = Intrinsic::aarch64_neon_umaxv; in EmitAArch64BuiltinExpr()
5959 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
5961 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5966 Int = Intrinsic::aarch64_neon_umaxv; in EmitAArch64BuiltinExpr()
5968 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
5970 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5975 Int = Intrinsic::aarch64_neon_smaxv; in EmitAArch64BuiltinExpr()
5977 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
5979 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5984 Int = Intrinsic::aarch64_neon_smaxv; in EmitAArch64BuiltinExpr()
5986 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
5988 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
5993 Int = Intrinsic::aarch64_neon_smaxv; in EmitAArch64BuiltinExpr()
5995 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
5997 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6002 Int = Intrinsic::aarch64_neon_smaxv; in EmitAArch64BuiltinExpr()
6004 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
6006 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6011 Int = Intrinsic::aarch64_neon_uminv; in EmitAArch64BuiltinExpr()
6013 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
6015 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6020 Int = Intrinsic::aarch64_neon_uminv; in EmitAArch64BuiltinExpr()
6022 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
6024 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6029 Int = Intrinsic::aarch64_neon_uminv; in EmitAArch64BuiltinExpr()
6031 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
6033 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6038 Int = Intrinsic::aarch64_neon_uminv; in EmitAArch64BuiltinExpr()
6040 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
6042 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6047 Int = Intrinsic::aarch64_neon_sminv; in EmitAArch64BuiltinExpr()
6049 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
6051 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6056 Int = Intrinsic::aarch64_neon_sminv; in EmitAArch64BuiltinExpr()
6058 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
6060 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6065 Int = Intrinsic::aarch64_neon_sminv; in EmitAArch64BuiltinExpr()
6067 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
6069 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6074 Int = Intrinsic::aarch64_neon_sminv; in EmitAArch64BuiltinExpr()
6076 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
6078 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6084 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); in EmitAArch64BuiltinExpr()
6088 Int = Intrinsic::aarch64_neon_uaddlv; in EmitAArch64BuiltinExpr()
6090 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
6092 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6097 Int = Intrinsic::aarch64_neon_uaddlv; in EmitAArch64BuiltinExpr()
6099 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
6101 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6105 Int = Intrinsic::aarch64_neon_uaddlv; in EmitAArch64BuiltinExpr()
6107 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
6109 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6114 Int = Intrinsic::aarch64_neon_uaddlv; in EmitAArch64BuiltinExpr()
6116 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
6118 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6122 Int = Intrinsic::aarch64_neon_saddlv; in EmitAArch64BuiltinExpr()
6124 VTy = llvm::VectorType::get(Int8Ty, 8); in EmitAArch64BuiltinExpr()
6126 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6131 Int = Intrinsic::aarch64_neon_saddlv; in EmitAArch64BuiltinExpr()
6133 VTy = llvm::VectorType::get(Int16Ty, 4); in EmitAArch64BuiltinExpr()
6135 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6139 Int = Intrinsic::aarch64_neon_saddlv; in EmitAArch64BuiltinExpr()
6141 VTy = llvm::VectorType::get(Int8Ty, 16); in EmitAArch64BuiltinExpr()
6143 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6148 Int = Intrinsic::aarch64_neon_saddlv; in EmitAArch64BuiltinExpr()
6150 VTy = llvm::VectorType::get(Int16Ty, 8); in EmitAArch64BuiltinExpr()
6152 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
6157 Int = Intrinsic::aarch64_neon_vsri; in EmitAArch64BuiltinExpr()
6163 Int = Intrinsic::aarch64_neon_vsli; in EmitAArch64BuiltinExpr()
6174 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; in EmitAArch64BuiltinExpr()
6183 // FIXME: Sharing loads & stores with 32-bit is complicated by the absence in EmitAArch64BuiltinExpr()
6191 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); in EmitAArch64BuiltinExpr()
6198 Int = Intrinsic::aarch64_neon_ld1x2; in EmitAArch64BuiltinExpr()
6202 Int = Intrinsic::aarch64_neon_ld1x3; in EmitAArch64BuiltinExpr()
6206 Int = Intrinsic::aarch64_neon_ld1x4; in EmitAArch64BuiltinExpr()
6211 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitAArch64BuiltinExpr()
6221 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); in EmitAArch64BuiltinExpr()
6227 Int = Intrinsic::aarch64_neon_st1x2; in EmitAArch64BuiltinExpr()
6231 Int = Intrinsic::aarch64_neon_st1x3; in EmitAArch64BuiltinExpr()
6235 Int = Intrinsic::aarch64_neon_st1x4; in EmitAArch64BuiltinExpr()
6253 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); in EmitAArch64BuiltinExpr()
6259 Value *V = UndefValue::get(Ty); in EmitAArch64BuiltinExpr()
6260 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); in EmitAArch64BuiltinExpr()
6263 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); in EmitAArch64BuiltinExpr()
6271 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitAArch64BuiltinExpr()
6279 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); in EmitAArch64BuiltinExpr()
6282 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6290 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); in EmitAArch64BuiltinExpr()
6293 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6301 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); in EmitAArch64BuiltinExpr()
6304 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6310 llvm::PointerType::getUnqual(VTy->getElementType()); in EmitAArch64BuiltinExpr()
6313 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); in EmitAArch64BuiltinExpr()
6316 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6322 llvm::PointerType::getUnqual(VTy->getElementType()); in EmitAArch64BuiltinExpr()
6325 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); in EmitAArch64BuiltinExpr()
6328 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6334 llvm::PointerType::getUnqual(VTy->getElementType()); in EmitAArch64BuiltinExpr()
6337 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); in EmitAArch64BuiltinExpr()
6340 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitAArch64BuiltinExpr()
6345 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
6346 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); in EmitAArch64BuiltinExpr()
6353 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitAArch64BuiltinExpr()
6359 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
6360 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); in EmitAArch64BuiltinExpr()
6368 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitAArch64BuiltinExpr()
6374 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
6375 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); in EmitAArch64BuiltinExpr()
6384 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); in EmitAArch64BuiltinExpr()
6392 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; in EmitAArch64BuiltinExpr()
6393 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), in EmitAArch64BuiltinExpr()
6401 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; in EmitAArch64BuiltinExpr()
6402 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), in EmitAArch64BuiltinExpr()
6409 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; in EmitAArch64BuiltinExpr()
6410 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), in EmitAArch64BuiltinExpr()
6418 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; in EmitAArch64BuiltinExpr()
6419 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), in EmitAArch64BuiltinExpr()
6426 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; in EmitAArch64BuiltinExpr()
6427 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), in EmitAArch64BuiltinExpr()
6435 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; in EmitAArch64BuiltinExpr()
6436 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), in EmitAArch64BuiltinExpr()
6448 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitAArch64BuiltinExpr()
6467 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitAArch64BuiltinExpr()
6485 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitAArch64BuiltinExpr()
6496 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), in EmitAArch64BuiltinExpr()
6500 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), in EmitAArch64BuiltinExpr()
6504 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), in EmitAArch64BuiltinExpr()
6508 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), in EmitAArch64BuiltinExpr()
6512 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), in EmitAArch64BuiltinExpr()
6516 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), in EmitAArch64BuiltinExpr()
6520 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), in EmitAArch64BuiltinExpr()
6524 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), in EmitAArch64BuiltinExpr()
6529 Int = Intrinsic::aarch64_neon_usqadd; in EmitAArch64BuiltinExpr()
6534 Int = Intrinsic::aarch64_neon_suqadd; in EmitAArch64BuiltinExpr()
6542 assert((Ops.size() & (Ops.size() - 1)) == 0 && in BuildVector()
6543 "Not a power-of-two sized vector!"); in BuildVector()
6553 return llvm::ConstantVector::get(CstOps); in BuildVector()
6558 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); in BuildVector()
6570 llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(), in getMaskVecValue()
6571 cast<IntegerType>(Mask->getType())->getBitWidth()); in getMaskVecValue()
6592 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitX86MaskedStore()
6596 if (C->isAllOnesValue()) in EmitX86MaskedStore()
6600 Ops[1]->getType()->getVectorNumElements()); in EmitX86MaskedStore()
6609 llvm::PointerType::getUnqual(Ops[1]->getType())); in EmitX86MaskedLoad()
6613 if (C->isAllOnesValue()) in EmitX86MaskedLoad()
6617 Ops[1]->getType()->getVectorNumElements()); in EmitX86MaskedLoad()
6627 if (C->isAllOnesValue()) in EmitX86Select()
6630 Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements()); in EmitX86Select()
6637 unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); in EmitX86MaskedCompare()
6642 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); in EmitX86MaskedCompare()
6645 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); in EmitX86MaskedCompare()
6661 if (!C || !C->isAllOnesValue()) in EmitX86MaskedCompare()
6671 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); in EmitX86MaskedCompare()
6674 IntegerType::get(CGF.getLLVMContext(), in EmitX86MaskedCompare()
6682 return EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), in EmitX86BuiltinExpr()
6691 Address DestAddr = EmitMSVAListRef(E->getArg(0)); in EmitX86BuiltinExpr()
6692 Address SrcAddr = EmitMSVAListRef(E->getArg(1)); in EmitX86BuiltinExpr()
6713 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { in EmitX86BuiltinExpr()
6716 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitX86BuiltinExpr()
6721 // that the generated intrinsic gets a ConstantInt. in EmitX86BuiltinExpr()
6723 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); in EmitX86BuiltinExpr()
6725 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); in EmitX86BuiltinExpr()
6734 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { in EmitX86BuiltinExpr()
6735 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); in EmitX86BuiltinExpr()
6747 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
6756 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); in EmitX86BuiltinExpr()
6757 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); in EmitX86BuiltinExpr()
6826 // Matching the struct layout from the compiler-rt/libgcc structure that is in EmitX86BuiltinExpr()
6832 llvm::Type *STy = llvm::StructType::get( in EmitX86BuiltinExpr()
6833 Int32Ty, Int32Ty, Int32Ty, llvm::ArrayType::get(Int32Ty, 1), nullptr); in EmitX86BuiltinExpr()
6841 ConstantInt::get(Int32Ty, 0), in EmitX86BuiltinExpr()
6842 ConstantInt::get(Int32Ty, 3), in EmitX86BuiltinExpr()
6843 ConstantInt::get(Int32Ty, 0) in EmitX86BuiltinExpr()
6851 Features, llvm::ConstantInt::get(Int32Ty, 1ULL << Feature)); in EmitX86BuiltinExpr()
6852 return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0)); in EmitX86BuiltinExpr()
6856 Value *RW = ConstantInt::get(Int32Ty, 0); in EmitX86BuiltinExpr()
6858 Value *Data = ConstantInt::get(Int32Ty, 1); in EmitX86BuiltinExpr()
6859 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); in EmitX86BuiltinExpr()
6865 return UndefValue::get(ConvertType(E->getType())); in EmitX86BuiltinExpr()
6873 llvm::ConstantInt::get(Ops[1]->getType(), 0)); in EmitX86BuiltinExpr()
6875 Address Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitX86BuiltinExpr()
6877 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), in EmitX86BuiltinExpr()
6881 Address Tmp = CreateMemTemp(E->getType()); in EmitX86BuiltinExpr()
6882 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), in EmitX86BuiltinExpr()
6898 Intrinsic::ID ID; in EmitX86BuiltinExpr()
6901 ID = Intrinsic::x86_##NAME; \ in EmitX86BuiltinExpr()
6904 default: llvm_unreachable("Unsupported intrinsic!"); in EmitX86BuiltinExpr()
6920 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); in EmitX86BuiltinExpr()
6959 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); in EmitX86BuiltinExpr()
6995 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); in EmitX86BuiltinExpr()
7001 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); in EmitX86BuiltinExpr()
7008 llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index); in EmitX86BuiltinExpr()
7020 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
7022 unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); in EmitX86BuiltinExpr()
7028 return llvm::Constant::getNullValue(ConvertType(E->getType())); in EmitX86BuiltinExpr()
7033 ShiftVal -= 16; in EmitX86BuiltinExpr()
7035 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
7039 // 256-bit palignr operates on 128-bit lanes so we need to handle that in EmitX86BuiltinExpr()
7044 Idx += NumElts - 16; // End of lane, switch operand. in EmitX86BuiltinExpr()
7062 llvm::MDNode *Node = llvm::MDNode::get( in EmitX86BuiltinExpr()
7063 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); in EmitX86BuiltinExpr()
7067 llvm::PointerType::getUnqual(Ops[1]->getType()), in EmitX86BuiltinExpr()
7070 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); in EmitX86BuiltinExpr()
7072 // No alignment for scalar intrinsic store. in EmitX86BuiltinExpr()
7073 SI->setAlignment(1); in EmitX86BuiltinExpr()
7078 llvm::MDNode *Node = llvm::MDNode::get( in EmitX86BuiltinExpr()
7079 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); in EmitX86BuiltinExpr()
7086 llvm::PointerType::getUnqual(Scl->getType()), in EmitX86BuiltinExpr()
7091 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); in EmitX86BuiltinExpr()
7092 SI->setAlignment(1); in EmitX86BuiltinExpr()
7153 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; in EmitX86BuiltinExpr()
7168 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; in EmitX86BuiltinExpr()
7178 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); in EmitX86BuiltinExpr()
7184 // TODO: Handle 64/512-bit vector widths of min/max. in EmitX86BuiltinExpr()
7227 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); in EmitX86BuiltinExpr()
7236 Intrinsic::ID ID; in EmitX86BuiltinExpr()
7238 default: llvm_unreachable("Unsupported intrinsic!"); in EmitX86BuiltinExpr()
7240 ID = Intrinsic::x86_rdrand_16; in EmitX86BuiltinExpr()
7243 ID = Intrinsic::x86_rdrand_32; in EmitX86BuiltinExpr()
7246 ID = Intrinsic::x86_rdrand_64; in EmitX86BuiltinExpr()
7249 ID = Intrinsic::x86_rdseed_16; in EmitX86BuiltinExpr()
7252 ID = Intrinsic::x86_rdseed_32; in EmitX86BuiltinExpr()
7255 ID = Intrinsic::x86_rdseed_64; in EmitX86BuiltinExpr()
7294 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
7311 // We can't handle 8-31 immediates with native IR, use the intrinsic. in EmitX86BuiltinExpr()
7312 Intrinsic::ID ID; in EmitX86BuiltinExpr()
7314 default: llvm_unreachable("Unsupported intrinsic!"); in EmitX86BuiltinExpr()
7316 ID = Intrinsic::x86_sse_cmp_ps; in EmitX86BuiltinExpr()
7319 ID = Intrinsic::x86_avx_cmp_ps_256; in EmitX86BuiltinExpr()
7322 ID = Intrinsic::x86_sse2_cmp_pd; in EmitX86BuiltinExpr()
7325 ID = Intrinsic::x86_avx_cmp_pd_256; in EmitX86BuiltinExpr()
7334 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); in EmitX86BuiltinExpr()
7336 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); in EmitX86BuiltinExpr()
7338 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); in EmitX86BuiltinExpr()
7340 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); in EmitX86BuiltinExpr()
7342 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); in EmitX86BuiltinExpr()
7344 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); in EmitX86BuiltinExpr()
7346 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); in EmitX86BuiltinExpr()
7348 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); in EmitX86BuiltinExpr()
7350 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); in EmitX86BuiltinExpr()
7352 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); in EmitX86BuiltinExpr()
7354 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); in EmitX86BuiltinExpr()
7356 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); in EmitX86BuiltinExpr()
7358 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); in EmitX86BuiltinExpr()
7360 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); in EmitX86BuiltinExpr()
7362 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); in EmitX86BuiltinExpr()
7364 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); in EmitX86BuiltinExpr()
7373 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) in EmitPPCBuiltinExpr()
7374 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitPPCBuiltinExpr()
7376 Intrinsic::ID ID = Intrinsic::not_intrinsic; in EmitPPCBuiltinExpr()
7381 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we in EmitPPCBuiltinExpr()
7384 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); in EmitPPCBuiltinExpr()
7403 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); in EmitPPCBuiltinExpr()
7405 ID = Intrinsic::ppc_altivec_lvx; in EmitPPCBuiltinExpr()
7408 ID = Intrinsic::ppc_altivec_lvxl; in EmitPPCBuiltinExpr()
7411 ID = Intrinsic::ppc_altivec_lvebx; in EmitPPCBuiltinExpr()
7414 ID = Intrinsic::ppc_altivec_lvehx; in EmitPPCBuiltinExpr()
7417 ID = Intrinsic::ppc_altivec_lvewx; in EmitPPCBuiltinExpr()
7420 ID = Intrinsic::ppc_altivec_lvsl; in EmitPPCBuiltinExpr()
7423 ID = Intrinsic::ppc_altivec_lvsr; in EmitPPCBuiltinExpr()
7426 ID = Intrinsic::ppc_vsx_lxvd2x; in EmitPPCBuiltinExpr()
7429 ID = Intrinsic::ppc_vsx_lxvw4x; in EmitPPCBuiltinExpr()
7450 default: llvm_unreachable("Unsupported st intrinsic!"); in EmitPPCBuiltinExpr()
7452 ID = Intrinsic::ppc_altivec_stvx; in EmitPPCBuiltinExpr()
7455 ID = Intrinsic::ppc_altivec_stvxl; in EmitPPCBuiltinExpr()
7458 ID = Intrinsic::ppc_altivec_stvebx; in EmitPPCBuiltinExpr()
7461 ID = Intrinsic::ppc_altivec_stvehx; in EmitPPCBuiltinExpr()
7464 ID = Intrinsic::ppc_altivec_stvewx; in EmitPPCBuiltinExpr()
7467 ID = Intrinsic::ppc_vsx_stxvd2x; in EmitPPCBuiltinExpr()
7470 ID = Intrinsic::ppc_vsx_stxvw4x; in EmitPPCBuiltinExpr()
7479 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7480 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7481 ID = Intrinsic::sqrt; in EmitPPCBuiltinExpr()
7490 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7491 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7492 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); in EmitPPCBuiltinExpr()
7493 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); in EmitPPCBuiltinExpr()
7499 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7500 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7501 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
7502 ID = Intrinsic::copysign; in EmitPPCBuiltinExpr()
7517 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7518 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7521 ID = Intrinsic::floor; in EmitPPCBuiltinExpr()
7524 ID = Intrinsic::round; in EmitPPCBuiltinExpr()
7527 ID = Intrinsic::nearbyint; in EmitPPCBuiltinExpr()
7530 ID = Intrinsic::ceil; in EmitPPCBuiltinExpr()
7533 ID = Intrinsic::trunc; in EmitPPCBuiltinExpr()
7541 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7542 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7543 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); in EmitPPCBuiltinExpr()
7556 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
7557 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
7558 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
7559 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
7561 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); in EmitPPCBuiltinExpr()
7581 return nullptr; // Suppress no-return warning in EmitPPCBuiltinExpr()
7594 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); in EmitAMDGPUBuiltinExpr()
7596 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
7597 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
7598 llvm::Value *Z = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
7600 llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, in EmitAMDGPUBuiltinExpr()
7601 X->getType()); in EmitAMDGPUBuiltinExpr()
7609 = FlagOutPtr.getPointer()->getType()->getPointerElementType(); in EmitAMDGPUBuiltinExpr()
7617 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
7618 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
7619 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
7620 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); in EmitAMDGPUBuiltinExpr()
7622 llvm::Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, in EmitAMDGPUBuiltinExpr()
7623 Src0->getType()); in EmitAMDGPUBuiltinExpr()
7629 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); in EmitAMDGPUBuiltinExpr()
7632 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); in EmitAMDGPUBuiltinExpr()
7635 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); in EmitAMDGPUBuiltinExpr()
7638 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); in EmitAMDGPUBuiltinExpr()
7641 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); in EmitAMDGPUBuiltinExpr()
7643 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); in EmitAMDGPUBuiltinExpr()
7645 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); in EmitAMDGPUBuiltinExpr()
7647 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); in EmitAMDGPUBuiltinExpr()
7650 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); in EmitAMDGPUBuiltinExpr()
7653 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); in EmitAMDGPUBuiltinExpr()
7657 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_exp); in EmitAMDGPUBuiltinExpr()
7661 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); in EmitAMDGPUBuiltinExpr()
7664 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); in EmitAMDGPUBuiltinExpr()
7669 CI->setConvergent(); in EmitAMDGPUBuiltinExpr()
7676 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); in EmitAMDGPUBuiltinExpr()
7677 return emitUnaryBuiltin(*this, E, Intrinsic::r600_rsq); in EmitAMDGPUBuiltinExpr()
7682 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); in EmitAMDGPUBuiltinExpr()
7683 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp); in EmitAMDGPUBuiltinExpr()
7688 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); in EmitAMDGPUBuiltinExpr()
7690 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); in EmitAMDGPUBuiltinExpr()
7692 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); in EmitAMDGPUBuiltinExpr()
7696 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); in EmitAMDGPUBuiltinExpr()
7698 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); in EmitAMDGPUBuiltinExpr()
7700 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); in EmitAMDGPUBuiltinExpr()
7707 /// to an int that receives the post-instruction CC value. At the LLVM level
7712 unsigned NumArgs = E->getNumArgs() - 1; in EmitSystemZIntrinsicWithCC()
7715 Args[I] = CGF.EmitScalarExpr(E->getArg(I)); in EmitSystemZIntrinsicWithCC()
7716 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); in EmitSystemZIntrinsicWithCC()
7728 Value *TDB = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7729 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); in EmitSystemZBuiltinExpr()
7730 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); in EmitSystemZBuiltinExpr()
7734 Value *TDB = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7735 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); in EmitSystemZBuiltinExpr()
7736 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); in EmitSystemZBuiltinExpr()
7740 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); in EmitSystemZBuiltinExpr()
7741 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); in EmitSystemZBuiltinExpr()
7742 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); in EmitSystemZBuiltinExpr()
7746 Value *Data = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7747 Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort); in EmitSystemZBuiltinExpr()
7751 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7752 Value *Data = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
7753 Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); in EmitSystemZBuiltinExpr()
7758 // to target-specific LLVM intrinsics. The ones handled specially here can in EmitSystemZBuiltinExpr()
7766 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7767 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7768 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); in EmitSystemZBuiltinExpr()
7776 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7777 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7778 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); in EmitSystemZBuiltinExpr()
7779 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); in EmitSystemZBuiltinExpr()
7787 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7788 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7789 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); in EmitSystemZBuiltinExpr()
7790 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); in EmitSystemZBuiltinExpr()
7795 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7796 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7797 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); in EmitSystemZBuiltinExpr()
7801 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7802 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7803 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
7804 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
7805 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); in EmitSystemZBuiltinExpr()
7809 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7810 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7811 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
7812 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
7814 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); in EmitSystemZBuiltinExpr()
7818 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7819 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7820 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); in EmitSystemZBuiltinExpr()
7824 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7825 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7827 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); in EmitSystemZBuiltinExpr()
7831 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
7832 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
7833 // Constant-fold the M4 and M5 mask arguments. in EmitSystemZBuiltinExpr()
7835 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext()); in EmitSystemZBuiltinExpr()
7836 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext()); in EmitSystemZBuiltinExpr()
7840 // standard intrinsic. We only support some combinations of M4 and M5. in EmitSystemZBuiltinExpr()
7841 Intrinsic::ID ID = Intrinsic::not_intrinsic; in EmitSystemZBuiltinExpr()
7844 case 0: // IEEE-inexact exception allowed in EmitSystemZBuiltinExpr()
7847 case 0: ID = Intrinsic::rint; break; in EmitSystemZBuiltinExpr()
7850 case 4: // IEEE-inexact exception suppressed in EmitSystemZBuiltinExpr()
7853 case 0: ID = Intrinsic::nearbyint; break; in EmitSystemZBuiltinExpr()
7854 case 1: ID = Intrinsic::round; break; in EmitSystemZBuiltinExpr()
7855 case 5: ID = Intrinsic::trunc; break; in EmitSystemZBuiltinExpr()
7856 case 6: ID = Intrinsic::ceil; break; in EmitSystemZBuiltinExpr()
7857 case 7: ID = Intrinsic::floor; break; in EmitSystemZBuiltinExpr()
7861 if (ID != Intrinsic::not_intrinsic) { in EmitSystemZBuiltinExpr()
7865 Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb); in EmitSystemZBuiltinExpr()
7866 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); in EmitSystemZBuiltinExpr()
7867 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); in EmitSystemZBuiltinExpr()
7871 // Vector intrisincs that output the post-instruction CC value. in EmitSystemZBuiltinExpr()
7875 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) in EmitSystemZBuiltinExpr()
7952 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
7955 getNaturalPointeeTypeAlignment(E->getArg(0)->getType(), &AlignSource); in EmitNVPTXBuiltinExpr()
7957 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), in EmitNVPTXBuiltinExpr()
7958 Ptr->getType()}), in EmitNVPTXBuiltinExpr()
7959 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); in EmitNVPTXBuiltinExpr()
8021 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
8022 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
8024 // LLVM's nvvm_atomic_load_add_f32 intrinsic for that. in EmitNVPTXBuiltinExpr()
8026 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType()); in EmitNVPTXBuiltinExpr()
8031 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
8032 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
8034 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); in EmitNVPTXBuiltinExpr()
8039 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
8040 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
8042 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); in EmitNVPTXBuiltinExpr()
8073 return MakeLdg(Intrinsic::nvvm_ldg_global_i); in EmitNVPTXBuiltinExpr()
8079 return MakeLdg(Intrinsic::nvvm_ldg_global_f); in EmitNVPTXBuiltinExpr()
8089 llvm::Type *ResultType = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
8090 Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_current_memory, ResultType); in EmitWebAssemblyBuiltinExpr()
8094 Value *X = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
8095 Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType()); in EmitWebAssemblyBuiltinExpr()