/external/mesa3d/src/mesa/x86/ |
D | sse_xform3.S | 77 MOVAPS ( REGOFF(0, EDX), XMM0 ) /* m0 | m1 | m2 | m3 */ 92 MULPS ( XMM0, XMM4 ) /* m3*ox | m2*ox | m1*ox | m0*ox */ 150 MOVLPS ( S(0), XMM0 ) 151 MOVLPS ( XMM0, D(0) ) 152 MOVSS ( S(2), XMM0 ) 153 MOVSS ( XMM0, D(2) ) 201 XORPS( XMM0, XMM0 ) /* clean the working register */ 214 MOVLPS ( S(0), XMM0 ) /* - | - | s1 | s0 */ 215 MULPS ( XMM1, XMM0 ) /* - | - | s1*m5 | s0*m0 */ 216 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ [all …]
|
D | sse_normal.S | 79 MOVSS ( ARG_SCALE, XMM0 ) /* scale */ 80 SHUFPS ( CONST(0x0), XMM0, XMM0 ) /* scale | scale */ 81 MULPS ( XMM0, XMM1 ) /* m5*scale | m0*scale */ 82 MULSS ( M(10), XMM0 ) /* m10*scale */ 91 MULSS ( XMM0, XMM2 ) /* uz*m10*scale */ 138 MOVSS ( M(0), XMM0 ) /* m0 */ 140 UNPCKLPS( XMM1, XMM0 ) /* m4 | m0 */ 145 MULPS ( XMM4, XMM0 ) /* m4*scale | m0*scale */ 164 MULPS ( XMM0, XMM3 ) /* ux*m4 | ux*m0 */ 231 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | sse_xform4.S | 78 MOVSS( SRC(0), XMM0 ) /* ox */ 79 SHUFPS( CONST(0x0), XMM0, XMM0 ) /* ox | ox | ox | ox */ 80 MULPS( XMM4, XMM0 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 94 ADDPS( XMM1, XMM0 ) /* ox*m3+oy*m7 | ... */ 95 ADDPS( XMM2, XMM0 ) /* ox*m3+oy*m7+oz*m11 | ... */ 96 ADDPS( XMM3, XMM0 ) /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */ 97 MOVAPS( XMM0, DST(0) ) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */ 143 MOVAPS( MAT(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 152 MULPS( XMM0, XMM4 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 217 MOVAPS( REGIND(ESI), XMM0 ) [all …]
|
D | sse_xform2.S | 76 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 84 MULPS( XMM0, XMM3 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 189 XORPS( XMM0, XMM0 ) /* clean the working register */ 200 MOVLPS ( S(0), XMM0 ) /* - | - | oy | ox */ 201 MULPS ( XMM1, XMM0 ) /* - | - | oy*m5 | ox*m0 */ 202 ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */ 203 MOVLPS ( XMM0, D(0) ) /* -> D(1) | -> D(0) */ 255 XORPS ( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 263 MOVSS( XMM0, D(3) ) /* ->D(3) */ 310 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ [all …]
|
D | sse_xform1.S | 77 MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */ 84 MULPS( XMM0, XMM2 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */ 186 MOVSS( M(0), XMM0 ) /* m0 */ 194 MULSS( XMM0, XMM4 ) /* ox*m0 */ 247 XORPS( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */ 258 MOVSS( XMM0, D(1) ) 259 MOVSS( XMM0, D(3) ) 305 MOVLPS( M(0), XMM0 ) /* m1 | m0 */ 312 MULPS( XMM0, XMM2 ) /* - | - | ox*m1 | ox*m0 */ 360 MOVSS( M(0), XMM0 ) /* m0 */ [all …]
|
D | common_x86_asm.S | 168 XORPS ( XMM0, XMM0 ) 197 XORPS ( XMM0, XMM0 ) 206 DIVPS ( XMM0, XMM1 )
|
/external/llvm/test/CodeGen/X86/ |
D | avx-cast.ll | 12 ; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 23 ; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 36 ; AVX1-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 43 ; AVX2-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 57 ; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 67 ; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 77 ; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
D | break-false-dep.ll | 188 ;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 189 ;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]] 190 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 191 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 192 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 193 ;SSE-NEXT: movsd [[XMM0]], 195 ;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 196 ;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], [[XMM0]] 197 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] 198 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] [all …]
|
D | vector-trunc-math.ll | 39 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 47 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 110 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 154 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 162 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 387 ; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 430 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 438 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 506 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 549 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> [all …]
|
D | vector-shuffle-combining-avx2.ll | 89 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> 113 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> 139 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> 161 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> 185 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> 196 ; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
D | x86-upgrade-avx2-vbroadcast.ll | 11 ; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
D | avx512-trunc.ll | 58 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 136 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 214 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 290 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 367 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
D | avx2-conversions.ll | 9 ; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 21 ; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
D | avx512-cvt.ll | 93 ; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 307 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def> 309 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> 334 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 593 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def> 634 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def> 636 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
D | vector-half-conversions.ll | 2405 ; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2452 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2498 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2548 ; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2596 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2643 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2695 ; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2743 ; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2790 ; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 2848 ; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> [all …]
|
D | avx512-calling-conv.ll | 138 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 167 ; KNL_X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 287 ; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill> 324 ; KNL_X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | break-false-dep.ll | 191 ;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 192 ;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]] 193 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 194 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 195 ;SSE-NEXT: mulsd {{.*}}, [[XMM0]] 196 ;SSE-NEXT: movsd [[XMM0]], 198 ;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]] 199 ;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], {{%xmm[0-9]+}} 200 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] 201 ;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]] [all …]
|
D | pr31143.ll | 4 ; CHECK: movss {{.*}}, %[[XMM0:xmm[0-9]+]] 6 ; CHECK: roundss $9, %[[XMM0]], %[[XMM1]] 31 ; CHECK: movsd {{.*}}, %[[XMM0:xmm[0-9]+]] 33 ; CHECK: roundsd $9, %[[XMM0]], %[[XMM1]]
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86CallingConv.td | 37 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 41 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 62 // case they use XMM0, otherwise it is the same as the common X86 calling 65 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 72 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 76 CCIfType<[f32], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 77 CCIfType<[f64], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 90 // The X86-64 calling convention always returns FP values in XMM0. 91 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>, 92 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>, [all …]
|
D | X86GenCallingConv.inc | 176 X86::XMM0, X86::XMM1, X86::XMM2 227 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 329 X86::XMM0, X86::XMM1, X86::XMM2 548 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 708 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 738 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 755 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 854 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 923 X86::XMM0, X86::XMM1, X86::XMM2 958 X86::XMM0, X86::XMM1, X86::XMM2 [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 53 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 57 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 84 // case they use XMM0, otherwise it is the same as the common X86 calling 87 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 94 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 98 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 99 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 112 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 114 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 140 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 47 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]; 53 let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 233 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 237 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 265 // case they use XMM0, otherwise it is the same as the common X86 calling 268 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 275 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 279 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 280 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 293 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/AsmParser/ |
D | X86Operand.h | 315 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem64_RC128() 318 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem128_RC128() 324 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem256_RC128() 331 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem64_RC128X() 334 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem128_RC128X() 340 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem256_RC128X()
|
/external/llvm/lib/Target/X86/AsmParser/ |
D | X86Operand.h | 242 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem64_RC128() 245 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem128_RC128() 251 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15); in isMem256_RC128() 258 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem64_RC128X() 261 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem128_RC128X() 267 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31); in isMem256_RC128X()
|
/external/swiftshader/third_party/LLVM/test/TableGen/ |
D | Slice.td | 41 def XMM0: Register<"xmm0">; 59 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|