1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * @file builder_misc.h
24 *
25 * @brief miscellaneous builder functions
26 *
27 * Notes:
28 *
29 ******************************************************************************/
30 #pragma once
31
32 Constant *C(bool i);
33 Constant *C(char i);
34 Constant *C(uint8_t i);
35 Constant *C(int i);
36 Constant *C(int64_t i);
37 Constant *C(uint16_t i);
38 Constant *C(uint32_t i);
39 Constant *C(float i);
40
41 template<typename Ty>
C(const std::initializer_list<Ty> & constList)42 Constant *C(const std::initializer_list<Ty> &constList)
43 {
44 std::vector<Constant*> vConsts;
45 for(auto i : constList) {
46
47 vConsts.push_back(C((Ty)i));
48 }
49 return ConstantVector::get(vConsts);
50 }
51
52 Constant *PRED(bool pred);
53 Value *VIMMED1(int i);
54 Value *VIMMED1(uint32_t i);
55 Value *VIMMED1(float i);
56 Value *VIMMED1(bool i);
57 Value *VUNDEF(Type* t);
58 Value *VUNDEF_F();
59 Value *VUNDEF_I();
60 Value *VUNDEF(Type* ty, uint32_t size);
61 Value *VUNDEF_IPTR();
62 #if HAVE_LLVM == 0x306
63 Value *VINSERT(Value *vec, Value *val, uint64_t index);
64 #endif
65 Value *VBROADCAST(Value *src);
66 Value *VRCP(Value *va);
67 Value *VPLANEPS(Value* vA, Value* vB, Value* vC, Value* &vX, Value* &vY);
68
69 uint32_t IMMED(Value* i);
70 int32_t S_IMMED(Value* i);
71
72 Value *GEP(Value* ptr, const std::initializer_list<Value*> &indexList);
73 Value *GEP(Value* ptr, const std::initializer_list<uint32_t> &indexList);
74 CallInst *CALL(Value *Callee, const std::initializer_list<Value*> &args);
75 #if HAVE_LLVM > 0x306
CALL(Value * Callee)76 CallInst *CALL(Value *Callee) { return CALLA(Callee); }
77 CallInst *CALL(Value *Callee, Value* arg);
78 CallInst *CALL2(Value *Callee, Value* arg1, Value* arg2);
79 CallInst *CALL3(Value *Callee, Value* arg1, Value* arg2, Value* arg3);
80 #endif
81
82 LoadInst *LOAD(Value *BasePtr, const std::initializer_list<uint32_t> &offset, const llvm::Twine& name = "");
83 LoadInst *LOADV(Value *BasePtr, const std::initializer_list<Value*> &offset, const llvm::Twine& name = "");
84 StoreInst *STORE(Value *Val, Value *BasePtr, const std::initializer_list<uint32_t> &offset);
85 StoreInst *STOREV(Value *Val, Value *BasePtr, const std::initializer_list<Value*> &offset);
86
VCMPPS_EQ(Value * a,Value * b)87 Value *VCMPPS_EQ(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_EQ_OQ)); }
VCMPPS_LT(Value * a,Value * b)88 Value *VCMPPS_LT(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_LT_OQ)); }
VCMPPS_LE(Value * a,Value * b)89 Value *VCMPPS_LE(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_LE_OQ)); }
VCMPPS_ISNAN(Value * a,Value * b)90 Value *VCMPPS_ISNAN(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_UNORD_Q)); }
VCMPPS_NEQ(Value * a,Value * b)91 Value *VCMPPS_NEQ(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_NEQ_OQ)); }
VCMPPS_GE(Value * a,Value * b)92 Value *VCMPPS_GE(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_GE_OQ)); }
VCMPPS_GT(Value * a,Value * b)93 Value *VCMPPS_GT(Value* a, Value* b) { return VCMPPS(a, b, C((uint8_t)_CMP_GT_OQ)); }
VCMPPS_NOTNAN(Value * a,Value * b)94 Value *VCMPPS_NOTNAN(Value* a, Value* b){ return VCMPPS(a, b, C((uint8_t)_CMP_ORD_Q)); }
95
96 Value *MASK(Value* vmask);
97 Value *VMASK(Value* mask);
98
99 //////////////////////////////////////////////////////////////////////////
100 /// @brief functions that build IR to call x86 intrinsics directly, or
101 /// emulate them with other instructions if not available on the host
102 //////////////////////////////////////////////////////////////////////////
103 Value *MASKLOADD(Value* src, Value* mask);
104
105 void Gather4(const SWR_FORMAT format, Value* pSrcBase, Value* byteOffsets,
106 Value* mask, Value* vGatherComponents[], bool bPackedOutput);
107
108 Value *GATHERPS(Value* src, Value* pBase, Value* indices, Value* mask, Value* scale);
109 void GATHER4PS(const SWR_FORMAT_INFO &info, Value* pSrcBase, Value* byteOffsets,
110 Value* mask, Value* vGatherComponents[], bool bPackedOutput);
111
112 Value *GATHERDD(Value* src, Value* pBase, Value* indices, Value* mask, Value* scale);
113 void GATHER4DD(const SWR_FORMAT_INFO &info, Value* pSrcBase, Value* byteOffsets,
114 Value* mask, Value* vGatherComponents[], bool bPackedOutput);
115
116 Value *GATHERPD(Value* src, Value* pBase, Value* indices, Value* mask, Value* scale);
117
118 void SCATTERPS(Value* pDst, Value* vSrc, Value* vOffsets, Value* vMask);
119
120 void Shuffle8bpcGather4(const SWR_FORMAT_INFO &info, Value* vGatherInput, Value* vGatherOutput[], bool bPackedOutput);
121 void Shuffle16bpcGather4(const SWR_FORMAT_INFO &info, Value* vGatherInput[], Value* vGatherOutput[], bool bPackedOutput);
122
123 Value *PSHUFB(Value* a, Value* b);
124 Value *PMOVSXBD(Value* a);
125 Value *PMOVSXWD(Value* a);
126 Value *PERMD(Value* a, Value* idx);
127 Value *PERMPS(Value* a, Value* idx);
128 Value *CVTPH2PS(Value* a);
129 Value *CVTPS2PH(Value* a, Value* rounding);
130 Value *PMAXSD(Value* a, Value* b);
131 Value *PMINSD(Value* a, Value* b);
132 Value *VABSPS(Value* a);
133 Value *FMADDPS(Value* a, Value* b, Value* c);
134
135 // LLVM removed VPCMPGTD x86 intrinsic. This emulates that behavior
VPCMPGTD(Value * a,Value * b)136 Value *VPCMPGTD(Value* a, Value* b)
137 {
138 Value* vIndexMask = ICMP_UGT(a,b);
139
140 // need to set the high bit for x86 intrinsic masks
141 return S_EXT(vIndexMask,VectorType::get(mInt32Ty,JM()->mVWidth));
142 }
143
144 Value *ICLAMP(Value* src, Value* low, Value* high);
145 Value *FCLAMP(Value* src, Value* low, Value* high);
146 Value *FCLAMP(Value* src, float low, float high);
147
148 CallInst *PRINT(const std::string &printStr);
149 CallInst *PRINT(const std::string &printStr,const std::initializer_list<Value*> &printArgs);
150 Value* STACKSAVE();
151 void STACKRESTORE(Value* pSaved);
152
153 Value* POPCNT(Value* a);
154
INT3()155 Value* INT3() { return INTERRUPT(C((uint8_t)3)); }
156
157
158 Value *VEXTRACTI128(Value* a, Constant* imm8);
159 Value *VINSERTI128(Value* a, Value* b, Constant* imm8);
160
161 // rdtsc buckets macros
162 void RDTSC_START(Value* pBucketMgr, Value* pId);
163 void RDTSC_STOP(Value* pBucketMgr, Value* pId);
164
165 Value* CreateEntryAlloca(Function* pFunc, Type* pType);
166
167 // Static stack allocations for scatter operations
168 Value* pScatterStackSrc{ nullptr };
169 Value* pScatterStackOffsets{ nullptr };
170
171