Home
last modified time | relevance | path

Searched refs:psrlw (Results 1 – 25 of 59) sorted by relevance

123

/external/libvpx/libvpx/vpx_dsp/x86/
Dhighbd_subpel_variance_impl_sse2.asm339 psrlw m1, 4
340 psrlw m0, 4
367 psrlw m1, 4
368 psrlw m0, 4
569 psrlw m1, 4
572 psrlw m0, 4
608 psrlw m4, 4
611 psrlw m0, 4
683 psrlw m1, 4
684 psrlw m0, 4
[all …]
Dquantize_ssse3_x86_64.asm38 psrlw m5, 15
41 psrlw m0, 1 ; m0 = (m0 + 1) / 2
42 psrlw m1, 1 ; m1 = (m1 + 1) / 2
95 psrlw m8, 1
96 psrlw m13, 1
148 psrlw m14, 1
149 psrlw m13, 1
/external/libyuv/files/source/
Drow_win.cc337 psrlw xmm4, 5 in RGB565ToARGBRow_SSE2()
387 psrlw xmm4, 6 in ARGB1555ToARGBRow_SSE2()
452 psrlw xmm3, 4 in ARGB4444ToARGBRow_SSE2()
635 psrlw xmm3, 8 in ARGBToARGB4444Row_SSE2()
679 psrlw xmm0, 7 in ARGBToYRow_SSSE3()
680 psrlw xmm2, 7 in ARGBToYRow_SSSE3()
713 psrlw xmm0, 7 in ARGBToYRow_Unaligned_SSSE3()
714 psrlw xmm2, 7 in ARGBToYRow_Unaligned_SSSE3()
747 psrlw xmm0, 7 in BGRAToYRow_SSSE3()
748 psrlw xmm2, 7 in BGRAToYRow_SSSE3()
[all …]
Dscale.cc210 psrlw xmm5, 8 in ScaleRowDown2_SSE2()
240 psrlw xmm5, 8 in ScaleRowDown2Int_SSE2()
253 psrlw xmm0, 8 in ScaleRowDown2Int_SSE2()
255 psrlw xmm1, 8 in ScaleRowDown2Int_SSE2()
284 psrlw xmm5, 8 in ScaleRowDown2_Unaligned_SSE2()
315 psrlw xmm5, 8 in ScaleRowDown2Int_Unaligned_SSE2()
328 psrlw xmm0, 8 in ScaleRowDown2Int_Unaligned_SSE2()
330 psrlw xmm1, 8 in ScaleRowDown2Int_Unaligned_SSE2()
393 psrlw xmm7, 8 in ScaleRowDown4Int_SSE2()
414 psrlw xmm0, 8 in ScaleRowDown4Int_SSE2()
[all …]
/external/libvpx/libvpx/third_party/libyuv/source/
Dscale_win.cc111 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSE2()
112 psrlw xmm1, 8 in ScaleRowDown2_SSE2()
133 psrlw xmm5, 8 in ScaleRowDown2Linear_SSE2()
141 psrlw xmm0, 8 in ScaleRowDown2Linear_SSE2()
143 psrlw xmm1, 8 in ScaleRowDown2Linear_SSE2()
170 psrlw xmm5, 8 in ScaleRowDown2Box_SSE2()
182 psrlw xmm0, 8 in ScaleRowDown2Box_SSE2()
184 psrlw xmm1, 8 in ScaleRowDown2Box_SSE2()
329 psrlw xmm0, 8 in ScaleRowDown4_SSE2()
353 psrlw xmm7, 8 in ScaleRowDown4Box_SSE2()
[all …]
Drow_x86.asm28 psrlw m2, m2, 8
40 psrlw m0, m0, 8 ; UYVY odd bytes are Y
41 psrlw m1, m1, 8
74 psrlw m4, m4, 8
82 psrlw m2, m0, 8 ; odd bytes
83 psrlw m3, m1, 8
Drow_win.cc482 psrlw xmm4, 5 in RGB565ToARGBRow_SSE2()
678 psrlw xmm4, 6 in ARGB1555ToARGBRow_SSE2()
741 psrlw xmm3, 4 in ARGB4444ToARGBRow_SSE2()
1010 psrlw xmm3, 8 in ARGBToARGB4444Row_SSE2()
1160 psrlw xmm0, 7 in ARGBToYRow_SSSE3()
1161 psrlw xmm2, 7 in ARGBToYRow_SSSE3()
1197 psrlw xmm0, 7 in ARGBToYJRow_SSSE3()
1198 psrlw xmm2, 7 in ARGBToYJRow_SSSE3()
1314 psrlw xmm0, 7 in BGRAToYRow_SSSE3()
1315 psrlw xmm2, 7 in BGRAToYRow_SSSE3()
[all …]
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_quantize_ssse3_x86_64.asm38 psrlw m5, 15
40 psrlw m1, 1 ; m1 = (m1 + 1) / 2
82 psrlw m8, 1
83 psrlw m13, 1
86 psrlw m0, m3, 2
88 psrlw m0, m3, 1
135 psrlw m14, 1
136 psrlw m13, 1
/external/llvm/test/CodeGen/X86/
D2012-02-23-mmx-inlineasm.ll5 ; CHECK: psrlw %mm0, %mm1
7 call void asm sideeffect "psrlw $0, %mm1", "y,~{dirflag},~{fpsr},~{flags}"(i32 8) nounwind
D2007-03-24-InlineAsmXConstraint.ll8 ; CHECK: psrlw $8, %xmm0
11 tail call void asm sideeffect "psrlw $0, %xmm0", "X,~{dirflag},~{fpsr},~{flags}"( i32 8 )
Dpr16807.ll11 ; CHECK: psrlw
15 ; CHECK: psrlw
Dlower-vec-shift.ll16 ; SSE: psrlw
17 ; SSE-NEXT: psrlw
33 ; SSE: psrlw
34 ; SSE-NEXT: psrlw
Dx86-shifts.ll82 ; CHECK: psrlw
83 ; CHECK-NEXT: psrlw
175 ; CHECK: psrlw $3
193 ; CHECK: psrlw $3
Dvshift-2.ll54 ; CHECK: psrlw
66 ; CHECK: psrlw
Dvector-idiv.ll135 ; SSE41-NEXT: psrlw $1, %xmm0
137 ; SSE41-NEXT: psrlw $2, %xmm0
145 ; SSE-NEXT: psrlw $1, %xmm0
147 ; SSE-NEXT: psrlw $2, %xmm0
169 ; SSE41-NEXT: psrlw $1, %xmm0
171 ; SSE41-NEXT: psrlw $2, %xmm0
174 ; SSE41-NEXT: psrlw $1, %xmm1
176 ; SSE41-NEXT: psrlw $2, %xmm1
185 ; SSE-NEXT: psrlw $1, %xmm0
187 ; SSE-NEXT: psrlw $2, %xmm0
[all …]
Dvec_insert-5.ll66 ; CHECK-NEXT: psrlw $8, %xmm0
75 ; CHECK-NEXT: psrlw $8, %xmm0
/external/mesa3d/src/mesa/x86/
Dread_rgba_span_x86.S564 psrlw $SCALE_ADJUST, %mm0
565 psrlw $SCALE_ADJUST, %mm2
599 psrlw $SCALE_ADJUST, %mm0
600 psrlw $SCALE_ADJUST, %mm2
637 psrlw $SCALE_ADJUST, %mm0
638 psrlw $SCALE_ADJUST, %mm2
667 psrlw $SCALE_ADJUST, %mm0
/external/libvpx/libvpx/vp8/encoder/x86/
Dquantize_mmx.asm214 psrlw mm0, 15
216 psrlw mm1, 15
237 psrlw mm0, 15
239 psrlw mm1, 15
/external/llvm/test/Analysis/CostModel/X86/
Dtestshiftlshr.ll276 ; SSE2-CODEGEN: psrlw $3
290 ; SSE2-CODEGEN: psrlw $3
306 ; SSE2-CODEGEN: psrlw $3
490 ; SSE2-CODEGEN: psrlw $3
503 ; SSE2-CODEGEN: psrlw $3
518 ; SSE2-CODEGEN: psrlw $3
/external/libvpx/libvpx/vp9/common/x86/
Dvp9_mfqe_sse2.asm73 psrlw xmm2, 4
74 psrlw xmm3, 4
142 psrlw xmm2, 4
/external/libvpx/libvpx/vp8/common/x86/
Dmfqe_sse2.asm72 psrlw xmm2, 4
73 psrlw xmm3, 4
141 psrlw xmm2, 4
/external/valgrind/VEX/test/
Dmmxtest.c392 #define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
393 #define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
394 #define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard) macro
590 do_test("psrlw", psrlw(ma,mb)); in main()
/external/valgrind/none/tests/x86/
Dinsn_mmx.def73 psrlw imm8[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
74 psrlw mm.uq[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
75 psrlw m64.uq[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
/external/valgrind/none/tests/amd64/
Dinsn_mmx.def93 psrlw imm8[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
94 psrlw mm.uq[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
95 psrlw m64.uq[4] mm.uw[0x0123,0x4567,0x89ab,0xcdef] => 1.uw[0x0012,0x0456,0x089a,0x0cde]
/external/llvm/test/MC/X86/
Dx86-32-coverage.s4461 psrlw 0xdeadbeef(%ebx,%ecx,8),%mm3
4465 psrlw 0x45,%mm3
4469 psrlw 0x7eed,%mm3
4473 psrlw 0xbabecafe,%mm3
4477 psrlw 0x12345678,%mm3
4481 psrlw %mm3,%mm3
4485 psrlw 0xdeadbeef(%ebx,%ecx,8),%xmm5
4489 psrlw 0x45,%xmm5
4493 psrlw 0x7eed,%xmm5
4497 psrlw 0xbabecafe,%xmm5
[all …]

123