/third_party/mesa3d/src/mesa/x86/ |
D | sse_xform3.S | 78 MOVAPS ( REGOFF(0, EDX), XMM0 ) /* m0 | m1 | m2 | m3 */ 93 MULPS ( XMM0, XMM4 ) /* m3*ox | m2*ox | m1*ox | m0*ox */ 151 MOVLPS ( S(0), XMM0 ) 152 MOVLPS ( XMM0, D(0) ) 153 MOVSS ( S(2), XMM0 ) 154 MOVSS ( XMM0, D(2) ) 202 XORPS( XMM0, XMM0 ) /* clean the working register */ 215 MOVLPS ( S(0), XMM0 ) /* - | - | s1 | s0 */ 216 MULPS ( XMM1, XMM0 ) /* - | - | s1*m5 | s0*m0 */ 217 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ [all …]
|
D | sse_normal.S | 80 MOVSS ( ARG_SCALE, XMM0 ) /* scale */ 81 SHUFPS ( CONST(0x0), XMM0, XMM0 ) /* scale | scale */ 82 MULPS ( XMM0, XMM1 ) /* m5*scale | m0*scale */ 83 MULSS ( M(10), XMM0 ) /* m10*scale */ 92 MULSS ( XMM0, XMM2 ) /* uz*m10*scale */ 139 MOVSS ( M(0), XMM0 ) /* m0 */ 141 UNPCKLPS( XMM1, XMM0 ) /* m4 | m0 */ 146 MULPS ( XMM4, XMM0 ) /* m4*scale | m0*scale */ 165 MULPS ( XMM0, XMM3 ) /* ux*m4 | ux*m0 */ 232 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | sse_xform4.S | 79 MOVSS( SRC(0), XMM0 ) /* ox */ 80 SHUFPS( CONST(0x0), XMM0, XMM0 ) /* ox | ox | ox | ox */ 81 MULPS( XMM4, XMM0 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 95 ADDPS( XMM1, XMM0 ) /* ox*m3+oy*m7 | ... */ 96 ADDPS( XMM2, XMM0 ) /* ox*m3+oy*m7+oz*m11 | ... */ 97 ADDPS( XMM3, XMM0 ) /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */ 98 MOVAPS( XMM0, DST(0) ) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */ 144 MOVAPS( MAT(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 153 MULPS( XMM0, XMM4 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 218 MOVAPS( REGIND(ESI), XMM0 ) [all …]
|
D | sse_xform2.S | 77 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 85 MULPS( XMM0, XMM3 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 190 XORPS( XMM0, XMM0 ) /* clean the working register */ 201 MOVLPS ( S(0), XMM0 ) /* - | - | oy | ox */ 202 MULPS ( XMM1, XMM0 ) /* - | - | oy*m5 | ox*m0 */ 203 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ 204 MOVLPS ( XMM0, D(0) ) /* -> D(1) | -> D(0) */ 256 XORPS ( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 264 MOVSS( XMM0, D(3) ) /* ->D(3) */ 311 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ [all …]
|
D | sse_xform1.S | 78 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 85 MULPS( XMM0, XMM2 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 187 MOVSS( M(0), XMM0 ) /* m0 */ 195 MULSS( XMM0, XMM4 ) /* ox*m0 */ 248 XORPS( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 259 MOVSS( XMM0, D(1) ) 260 MOVSS( XMM0, D(3) ) 306 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ 313 MULPS( XMM0, XMM2 ) /* - | - | ox*m1 | ox*m0 */ 361 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | common_x86_asm.S | 167 XORPS ( XMM0, XMM0 ) 196 XORPS ( XMM0, XMM0 ) 205 DIVPS ( XMM0, XMM1 )
|
D | assyntax.h | 224 #define XMM0 %xmm0 macro
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 46 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]; 52 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 233 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 237 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 265 // case they use XMM0, otherwise it is the same as the common X86 calling 268 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 275 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 279 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 280 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 293 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. [all …]
|
D | X86CallingConv.cpp | 80 static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2, in CC_X86_VectorCallGetSSEs()
|
D | X86CallLowering.cpp | 165 static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2, in assignArg()
|
D | X86RegisterInfo.cpp | 600 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) in getReservedRegs()
|
D | README-SSE.txt | 807 This would be better kept in the SSE unit by treating XMM0 as a 4xfloat and
|
D | X86RegisterInfo.td | 212 def XMM0: X86Reg<"xmm0", 0>, DwarfRegNum<[17, 21, 21]>;
|
D | X86InstrSSE.td | 6331 let Uses = [XMM0], Constraints = "$src1 = $dst" in { 6340 (VT (OpNode XMM0, VR128:$src2, VR128:$src1)))]>, 6348 (OpNode XMM0, (mem_frag addr:$src2), VR128:$src1))]>, 6377 def : Pat<(v4i32 (X86Blendv (v4i32 XMM0), (v4i32 VR128:$src1), 6380 def : Pat<(v2i64 (X86Blendv (v2i64 XMM0), (v2i64 VR128:$src1), 6502 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in { 6520 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in { 6624 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)), 6635 (memop addr:$src2), XMM0)), 6666 let Uses=[XMM0] in
|
D | X86FastISel.cpp | 3125 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, in fastLowerArguments() 3462 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, in fastLowerCall()
|
D | X86InstrCompiler.td | 477 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 497 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/AsmParser/ |
D | X86Operand.h | 321 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem64_RC128() 324 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem128_RC128() 330 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem256_RC128() 337 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem64_RC128X() 340 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem128_RC128X() 346 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem256_RC128X()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/ |
D | X86GenCallingConv.inc | 232 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 438 X86::XMM0, X86::XMM1, X86::XMM2 603 X86::XMM0, X86::XMM1, X86::XMM2 880 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 904 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1173 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1237 X86::XMM0, X86::XMM1, X86::XMM2 1550 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1952 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 1976 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… [all …]
|
D | X86GenRegisterInfo.inc | 163 XMM0 = 143, 1279 { X86::XMM0 }, 1586 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 1636 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 1956 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 1986 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 2426 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 2436 …X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8,… 2732 { 17U, X86::XMM0 }, 2809 { 21U, X86::XMM0 }, [all …]
|
/third_party/libffi/src/x86/ |
D | win64_intel.S | 67 movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/Disassembler/ |
D | X86DisassemblerDecoder.h | 219 ENTRY(XMM0) \
|
D | X86Disassembler.cpp | 1941 mcInst.addOperand(MCOperand::createReg(X86::XMM0 + (immediate >> 4))); in translateImmediate()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/MCTargetDesc/ |
D | X86MCTargetDesc.cpp | 134 {codeview::RegisterId::XMM0, X86::XMM0}, in initLLVMToSEHAndCVRegMapping()
|
D | X86InstComments.cpp | 207 if (X86::XMM0 <= RegNo && RegNo <= X86::XMM31) in getVectorRegSize()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/DebugInfo/CodeView/ |
D | CodeViewRegisters.def | 151 CV_REGISTER(XMM0, 154)
|