Home
last modified time | relevance | path

Searched refs:psrlw (Results 1 – 25 of 77) sorted by relevance

1234

/external/libjpeg-turbo/simd/
Djcsample-sse2-64.asm101 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..}
131 psrlw xmm2,BYTE_BIT
133 psrlw xmm3,BYTE_BIT
139 psrlw xmm0,1
140 psrlw xmm1,1
245 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..}
279 psrlw xmm4,BYTE_BIT
281 psrlw xmm5,BYTE_BIT
288 psrlw xmm4,BYTE_BIT
290 psrlw xmm5,BYTE_BIT
[all …]
Djcsample-mmx.asm104 psrlw mm6,BYTE_BIT ; mm6={0xFF 0x00 0xFF 0x00 ..}
125 psrlw mm2,BYTE_BIT
127 psrlw mm3,BYTE_BIT
133 psrlw mm0,1
134 psrlw mm1,1
246 psrlw mm6,BYTE_BIT ; mm6={0xFF 0x00 0xFF 0x00 ..}
270 psrlw mm4,BYTE_BIT
272 psrlw mm5,BYTE_BIT
279 psrlw mm4,BYTE_BIT
281 psrlw mm5,BYTE_BIT
[all …]
Djcsample-sse2.asm104 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..}
137 psrlw xmm2,BYTE_BIT
139 psrlw xmm3,BYTE_BIT
145 psrlw xmm0,1
146 psrlw xmm1,1
259 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..}
296 psrlw xmm4,BYTE_BIT
298 psrlw xmm5,BYTE_BIT
305 psrlw xmm4,BYTE_BIT
307 psrlw xmm5,BYTE_BIT
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dhighbd_subpel_variance_impl_sse2.asm339 psrlw m1, 4
340 psrlw m0, 4
367 psrlw m1, 4
368 psrlw m0, 4
569 psrlw m1, 4
572 psrlw m0, 4
608 psrlw m4, 4
611 psrlw m0, 4
683 psrlw m1, 4
684 psrlw m0, 4
[all …]
Dquantize_ssse3_x86_64.asm38 psrlw m5, 15
41 psrlw m0, 1 ; m0 = (m0 + 1) / 2
42 psrlw m1, 1 ; m1 = (m1 + 1) / 2
129 psrlw m8, 1
130 psrlw m13, 1
231 psrlw m14, 1
232 psrlw m13, 1
/external/llvm/test/CodeGen/X86/
Dvector-popcnt-128.ll60 ; SSSE3-NEXT: psrlw $4, %xmm0
76 ; SSE41-NEXT: psrlw $4, %xmm0
162 ; SSSE3-NEXT: psrlw $4, %xmm0
184 ; SSE41-NEXT: psrlw $4, %xmm0
223 ; SSE2-NEXT: psrlw $1, %xmm1
229 ; SSE2-NEXT: psrlw $2, %xmm0
233 ; SSE2-NEXT: psrlw $4, %xmm1
239 ; SSE2-NEXT: psrlw $8, %xmm0
245 ; SSE3-NEXT: psrlw $1, %xmm1
251 ; SSE3-NEXT: psrlw $2, %xmm0
[all …]
Dvector-shift-lshr-128.ll189 ; SSE2-NEXT: psrlw $8, %xmm0
197 ; SSE2-NEXT: psrlw $4, %xmm0
205 ; SSE2-NEXT: psrlw $2, %xmm0
212 ; SSE2-NEXT: psrlw $1, %xmm0
227 ; SSE41-NEXT: psrlw $8, %xmm4
231 ; SSE41-NEXT: psrlw $4, %xmm1
235 ; SSE41-NEXT: psrlw $2, %xmm1
240 ; SSE41-NEXT: psrlw $1, %xmm1
289 ; X32-SSE-NEXT: psrlw $8, %xmm0
297 ; X32-SSE-NEXT: psrlw $4, %xmm0
[all …]
Dvector-tzcnt-128.ll226 ; SSSE3-NEXT: psrlw $4, %xmm2
251 ; SSE41-NEXT: psrlw $4, %xmm2
381 ; SSSE3-NEXT: psrlw $4, %xmm2
406 ; SSE41-NEXT: psrlw $4, %xmm2
472 ; SSE2-NEXT: psrlw $1, %xmm0
478 ; SSE2-NEXT: psrlw $2, %xmm1
482 ; SSE2-NEXT: psrlw $4, %xmm2
488 ; SSE2-NEXT: psrlw $8, %xmm0
498 ; SSE3-NEXT: psrlw $1, %xmm0
504 ; SSE3-NEXT: psrlw $2, %xmm1
[all …]
D2012-02-23-mmx-inlineasm.ll5 ; CHECK: psrlw %mm0, %mm1
7 call void asm sideeffect "psrlw $0, %mm1", "y,~{dirflag},~{fpsr},~{flags}"(i32 8) nounwind
D2007-03-24-InlineAsmXConstraint.ll8 ; CHECK: psrlw $8, %xmm0
11 tail call void asm sideeffect "psrlw $0, %xmm0", "X,~{dirflag},~{fpsr},~{flags}"( i32 8 )
Dpr16807.ll11 ; CHECK: psrlw
15 ; CHECK: psrlw
Dvector-rotate-128.ll296 ; SSE2-NEXT: psrlw $8, %xmm0
304 ; SSE2-NEXT: psrlw $4, %xmm0
312 ; SSE2-NEXT: psrlw $2, %xmm0
319 ; SSE2-NEXT: psrlw $1, %xmm0
363 ; SSE41-NEXT: psrlw $8, %xmm4
367 ; SSE41-NEXT: psrlw $4, %xmm2
371 ; SSE41-NEXT: psrlw $2, %xmm2
376 ; SSE41-NEXT: psrlw $1, %xmm2
481 ; X32-SSE-NEXT: psrlw $8, %xmm0
489 ; X32-SSE-NEXT: psrlw $4, %xmm0
[all …]
Dlower-vec-shift.ll16 ; SSE: psrlw
17 ; SSE-NEXT: psrlw
33 ; SSE: psrlw
34 ; SSE-NEXT: psrlw
Dx86-shifts.ll82 ; CHECK: psrlw
83 ; CHECK-NEXT: psrlw
175 ; CHECK: psrlw $3
193 ; CHECK: psrlw $3
Dvshift-2.ll54 ; CHECK: psrlw
66 ; CHECK: psrlw
/external/libvpx/libvpx/third_party/libyuv/source/
Dscale_win.cc111 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSE2()
112 psrlw xmm1, 8 in ScaleRowDown2_SSE2()
133 psrlw xmm5, 8 in ScaleRowDown2Linear_SSE2()
141 psrlw xmm0, 8 in ScaleRowDown2Linear_SSE2()
143 psrlw xmm1, 8 in ScaleRowDown2Linear_SSE2()
170 psrlw xmm5, 8 in ScaleRowDown2Box_SSE2()
182 psrlw xmm0, 8 in ScaleRowDown2Box_SSE2()
184 psrlw xmm1, 8 in ScaleRowDown2Box_SSE2()
329 psrlw xmm0, 8 in ScaleRowDown4_SSE2()
353 psrlw xmm7, 8 in ScaleRowDown4Box_SSE2()
[all …]
Drow_x86.asm28 psrlw m2, m2, 8
40 psrlw m0, m0, 8 ; UYVY odd bytes are Y
41 psrlw m1, m1, 8
74 psrlw m4, m4, 8
82 psrlw m2, m0, 8 ; odd bytes
83 psrlw m3, m1, 8
Drow_win.cc482 psrlw xmm4, 5 in RGB565ToARGBRow_SSE2()
678 psrlw xmm4, 6 in ARGB1555ToARGBRow_SSE2()
741 psrlw xmm3, 4 in ARGB4444ToARGBRow_SSE2()
1010 psrlw xmm3, 8 in ARGBToARGB4444Row_SSE2()
1160 psrlw xmm0, 7 in ARGBToYRow_SSSE3()
1161 psrlw xmm2, 7 in ARGBToYRow_SSSE3()
1197 psrlw xmm0, 7 in ARGBToYJRow_SSSE3()
1198 psrlw xmm2, 7 in ARGBToYJRow_SSSE3()
1314 psrlw xmm0, 7 in BGRAToYRow_SSSE3()
1315 psrlw xmm2, 7 in BGRAToYRow_SSSE3()
[all …]
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_quantize_ssse3_x86_64.asm38 psrlw m5, 15
40 psrlw m1, 1 ; m1 = (m1 + 1) / 2
82 psrlw m8, 1
83 psrlw m13, 1
86 psrlw m0, m3, 2
88 psrlw m0, m3, 1
135 psrlw m14, 1
136 psrlw m13, 1
/external/libyuv/files/source/
Dscale_win.cc110 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSSE3()
111 psrlw xmm1, 8 in ScaleRowDown2_SSSE3()
133 psrlw xmm4, 15 in ScaleRowDown2Linear_SSSE3()
167 psrlw xmm4, 15 in ScaleRowDown2Box_SSSE3()
183 psrlw xmm0, 1 in ScaleRowDown2Box_SSSE3()
184 psrlw xmm1, 1 in ScaleRowDown2Box_SSSE3()
330 psrlw xmm0, 8 in ScaleRowDown4_SSSE3()
354 psrlw xmm4, 15 in ScaleRowDown4Box_SSSE3()
385 psrlw xmm0, 4 // /16 for average of 4 * 4 in ScaleRowDown4Box_SSSE3()
568 psrlw xmm0, 2 in ScaleRowDown34_1_Box_SSSE3()
[all …]
Drow_win.cc477 psrlw xmm4, 5
673 psrlw xmm4, 6
736 psrlw xmm3, 4
1003 psrlw xmm3, 8
1153 psrlw xmm0, 7
1154 psrlw xmm2, 7
1190 psrlw xmm0, 7
1191 psrlw xmm2, 7
1307 psrlw xmm0, 7
1308 psrlw xmm2, 7
[all …]
/external/llvm/test/Analysis/CostModel/X86/
Dtestshiftlshr.ll34 ; SSE2-CODEGEN: psrlw
46 ; SSE2-CODEGEN: psrlw
58 ; SSE2-CODEGEN: psrlw
214 ; SSE2-CODEGEN: psrlw
226 ; SSE2-CODEGEN: psrlw
238 ; SSE2-CODEGEN: psrlw
276 ; SSE2-CODEGEN: psrlw $3
290 ; SSE2-CODEGEN: psrlw $3
306 ; SSE2-CODEGEN: psrlw $3
490 ; SSE2-CODEGEN: psrlw $3
[all …]
/external/mesa3d/src/mesa/x86/
Dread_rgba_span_x86.S564 psrlw $SCALE_ADJUST, %mm0
565 psrlw $SCALE_ADJUST, %mm2
599 psrlw $SCALE_ADJUST, %mm0
600 psrlw $SCALE_ADJUST, %mm2
637 psrlw $SCALE_ADJUST, %mm0
638 psrlw $SCALE_ADJUST, %mm2
667 psrlw $SCALE_ADJUST, %mm0
/external/libvpx/libvpx/vp8/encoder/x86/
Dquantize_mmx.asm214 psrlw mm0, 15
216 psrlw mm1, 15
237 psrlw mm0, 15
239 psrlw mm1, 15
/external/libvpx/libvpx/vp8/common/x86/
Dmfqe_sse2.asm72 psrlw xmm2, 4
73 psrlw xmm3, 4
141 psrlw xmm2, 4

1234