/third_party/flutter/skia/third_party/externals/libjpeg-turbo/simd/loongson/ |
D | jdsample-mmi.c | 69 mm1 = mm7; \ 71 mm1 = _mm_slli_si64(mm1, 2 * BYTE_BIT); /* mm1=( - 0 1 2) */ \ 76 mm1 = _mm_or_si64(mm1, wk[r]); /* mm1=(-1 0 1 2) */ \ 83 mm1 = _mm_add_pi16(mm1, PW_EIGHT); \ 88 mm1 = _mm_add_pi16(mm1, mm7); \ 90 mm1 = _mm_srli_pi16(mm1, 4); /* mm1=OutrLE=( 0 2 4 6) */ \ 99 mm1 = _mm_or_si64(mm1, mm0); /* mm1=OutrL=( 0 1 2 3 4 5 6 7) */ \ 102 _mm_store_si64((__m64 *)outptr##r, mm1); \ 114 __m64 mm0, mm1, mm2, mm3 = 0.0, mm4, mm5, mm6, mm7 = 0.0; in jsimd_h2v2_fancy_upsample_mmi() local 148 mm1 = _mm_load_si64((__m64 *)inptr_1); /* mm1 = row[-1][0] */ in jsimd_h2v2_fancy_upsample_mmi() [all …]
|
D | jquanti-mmi.c | 41 mm1 = mm3; \ 48 mm1 = _mm_xor_si64(mm1, mm3); \ 50 mm1 = _mm_sub_pi16(mm1, mm3); \ 56 mm1 = _mm_add_pi16(mm1, corr1); \ 59 mm5 = mm1; \ 65 mm1 = _mm_mulhi_pi16(mm1, recip1); \ 68 mm1 = _mm_add_pi16(mm1, mm5); /* (MSB=1), so we always need to add the */ \ 79 mm5 = mm1; \ 82 mm1 = _mm_mulhi_pi16(mm1, mm7); \ 91 mm1 = _mm_add_pi16(mm1, mm7); \ [all …]
|
D | jccolext-mmi.c | 37 #define mmB mm1 51 #define mmD mm1 65 #define mmF mm1 79 #define mmH mm1 98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_rgb_ycc_convert_mmi() local 314 wk[1] = mm1; in jsimd_rgb_ycc_convert_mmi() 318 mm6 = mm1; in jsimd_rgb_ycc_convert_mmi() 319 mm1 = _mm_unpacklo_pi16(mm1, mm3); in jsimd_rgb_ycc_convert_mmi() 321 mm7 = mm1; in jsimd_rgb_ycc_convert_mmi() 323 mm1 = _mm_madd_pi16(mm1, PW_F0299_F0337); in jsimd_rgb_ycc_convert_mmi() [all …]
|
D | jdcolext-mmi.c | 37 #define mmB mm1 51 #define mmD mm1 65 #define mmF mm1 79 #define mmH mm1 98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_ycc_rgb_convert_mmi() local 112 mm1 = _mm_load_si64((__m64 *)inptr2); in jsimd_ycc_rgb_convert_mmi() 124 mm0 = _mm_and_si64(mm0, mm1); /* mm0=Cr(0246)=CrE */ in jsimd_ycc_rgb_convert_mmi() 125 mm1 = _mm_srli_pi16(mm1, BYTE_BIT); /* mm1=Cr(1357)=CrO */ in jsimd_ycc_rgb_convert_mmi() 129 mm1 = _mm_add_pi16(mm1, mm7); in jsimd_ycc_rgb_convert_mmi() 147 mm7 = mm1; /* mm7 = CrO */ in jsimd_ycc_rgb_convert_mmi() [all …]
|
/third_party/libjpeg-turbo/simd/loongson/ |
D | jdsample-mmi.c | 69 mm1 = mm7; \ 71 mm1 = _mm_slli_si64(mm1, 2 * BYTE_BIT); /* mm1=( - 0 1 2) */ \ 76 mm1 = _mm_or_si64(mm1, wk[r]); /* mm1=(-1 0 1 2) */ \ 83 mm1 = _mm_add_pi16(mm1, PW_EIGHT); \ 88 mm1 = _mm_add_pi16(mm1, mm7); \ 90 mm1 = _mm_srli_pi16(mm1, 4); /* mm1=OutrLE=( 0 2 4 6) */ \ 99 mm1 = _mm_or_si64(mm1, mm0); /* mm1=OutrL=( 0 1 2 3 4 5 6 7) */ \ 102 _mm_store_si64((__m64 *)outptr##r, mm1); \ 114 __m64 mm0, mm1, mm2, mm3 = 0.0, mm4, mm5, mm6, mm7 = 0.0; in jsimd_h2v2_fancy_upsample_mmi() local 148 mm1 = _mm_load_si64((__m64 *)inptr_1); /* mm1 = row[-1][0] */ in jsimd_h2v2_fancy_upsample_mmi() [all …]
|
D | jquanti-mmi.c | 41 mm1 = mm3; \ 48 mm1 = _mm_xor_si64(mm1, mm3); \ 50 mm1 = _mm_sub_pi16(mm1, mm3); \ 56 mm1 = _mm_add_pi16(mm1, corr1); \ 59 mm5 = mm1; \ 65 mm1 = _mm_mulhi_pi16(mm1, recip1); \ 68 mm1 = _mm_add_pi16(mm1, mm5); /* (MSB=1), so we always need to add the */ \ 79 mm5 = mm1; \ 82 mm1 = _mm_mulhi_pi16(mm1, mm7); \ 91 mm1 = _mm_add_pi16(mm1, mm7); \ [all …]
|
D | jccolext-mmi.c | 38 #define mmB mm1 52 #define mmD mm1 66 #define mmF mm1 80 #define mmH mm1 99 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_rgb_ycc_convert_mmi() local 328 wk[1] = mm1; in jsimd_rgb_ycc_convert_mmi() 332 mm6 = mm1; in jsimd_rgb_ycc_convert_mmi() 333 mm1 = _mm_unpacklo_pi16(mm1, mm3); in jsimd_rgb_ycc_convert_mmi() 335 mm7 = mm1; in jsimd_rgb_ycc_convert_mmi() 337 mm1 = _mm_madd_pi16(mm1, PW_F0299_F0337); in jsimd_rgb_ycc_convert_mmi() [all …]
|
D | jdcolext-mmi.c | 37 #define mmB mm1 51 #define mmD mm1 65 #define mmF mm1 79 #define mmH mm1 98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; in jsimd_ycc_rgb_convert_mmi() local 112 mm1 = _mm_load_si64((__m64 *)inptr2); in jsimd_ycc_rgb_convert_mmi() 124 mm0 = _mm_and_si64(mm0, mm1); /* mm0=Cr(0246)=CrE */ in jsimd_ycc_rgb_convert_mmi() 125 mm1 = _mm_srli_pi16(mm1, BYTE_BIT); /* mm1=Cr(1357)=CrO */ in jsimd_ycc_rgb_convert_mmi() 129 mm1 = _mm_add_pi16(mm1, mm7); in jsimd_ycc_rgb_convert_mmi() 147 mm7 = mm1; /* mm7 = CrO */ in jsimd_ycc_rgb_convert_mmi() [all …]
|
/third_party/gstreamer/gstplugins_good/gst/deinterlace/tvtime/tomsmocomp/ |
D | StrangeBob.inc | 37 "movq -4(%%"XBX", %%"XCX"), %%mm1\n\t" // value m from bottom right 40 "psubusb %%mm1, %%mm3\n\t" 41 "psubusb %%mm0, %%mm1\n\t" 42 "por %%mm1, %%mm3\n\t" // abs(a,m) 51 "movq 4(%%"XBX", %%"XCX"), %%mm1\n\t" // value n 53 V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(j,n) 55 "psubusb %%mm1, %%mm0\n\t" 56 "psubusb %%mm3, %%mm1\n\t" 57 "por %%mm1, %%mm0\n\t" // abs(j,n) 59 "movq %%mm0, %%mm1\n\t" [all …]
|
D | WierdBob.inc | 19 "movq 2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f from bottom right 21 // pavgb %%mm6, %%mm1 // avg(a,f), also best so far 22 V_PAVGB ("%%mm6", "%%mm1", "%%mm7", _ShiftMask) // avg(a,f), also best so far 24 "psubusb %%mm1, %%mm7\n\t" 25 "psubusb %%mm0, %%mm1\n\t" 26 "por %%mm1, %%mm7\n\t" // abs diff, also best so far 30 "movq -2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f from bottom right 32 // pavgb %%mm2, %%mm1 // avg(c,d) 33 V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(c,d) 35 "psubusb %%mm1, %%mm3\n\t" [all …]
|
/third_party/ffmpeg/libavcodec/x86/ |
D | hpeldsp_rnd_template.c | 42 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 51 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 74 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 81 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 90 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 97 PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) in DEF() 119 PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) in DEF() 126 PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) in DEF() 147 PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) in DEF() 153 PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) in DEF() [all …]
|
D | h264_intrapred.asm | 56 movq mm1, [r0+8] 59 movq [r0+r1*1+8], mm1 61 movq [r0+r1*2+8], mm1 133 pxor mm1, mm1 135 psadbw mm1, [r0+8] 138 paddw mm0, mm1 196 movq mm1, mm0 199 punpckhbw mm1, mm7 213 paddw mm5, mm1 811 pxor mm1, mm1 [all …]
|
D | simple_idct.asm | 84 movq mm1, [blockq + %2] ; R6 R2 r6 r2 89 por mm4, mm1 101 pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2 103 pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2 113 paddd mm1, mm0 ; A1 a1 115 psubd mm0, mm1 ; A2 a2 126 movq mm2, mm1 ; A1 a1 127 paddd mm1, mm5 ; A1+B1 a1+b1 129 psrad mm1, %7 131 packssdw mm7, mm1 ; A1+B1 a1+b1 A0+B0 a0+b0 [all …]
|
/third_party/flutter/skia/third_party/externals/libjpeg-turbo/simd/i386/ |
D | jquant-3dn.asm | 63 movq mm1, MMWORD [edx+eax*SIZEOF_JSAMPLE] 66 psubb mm1, mm7 ; mm1=(89ABCDEF) 70 punpcklbw mm3, mm1 ; mm3=(*8*9*A*B) 71 punpckhbw mm1, mm1 ; mm1=(*C*D*E*F) 94 punpcklwd mm4, mm1 ; mm4=(***C***D) 95 punpckhwd mm1, mm1 ; mm1=(***E***F) 102 psrad mm1, (DWORD_BIT-BYTE_BIT) ; mm1=(EF) 104 pi2fd mm1, mm1 109 movq MMWORD [MMBLOCK(1,3,edi,SIZEOF_FAST_FLOAT)], mm1 162 movq mm1, MMWORD [MMBLOCK(0,1,esi,SIZEOF_FAST_FLOAT)] [all …]
|
D | jccolext-mmx.asm | 268 ; mm1=(R1 R3 R5 R7)=RO, mm3=(G1 G3 G5 G7)=GO, mm5=(B1 B3 B5 B7)=BO 281 movq MMWORD [wk(1)], mm1 ; wk(1)=RO 285 movq mm6, mm1 286 punpcklwd mm1, mm3 288 movq mm7, mm1 290 pmaddwd mm1, [GOTOFF(eax,PW_F0299_F0337)] ; mm1=ROL*FIX(0.299)+GOL*FIX(0.337) 295 movq MMWORD [wk(4)], mm1 ; wk(4)=ROL*FIX(0.299)+GOL*FIX(0.337) 298 pxor mm1, mm1 300 punpcklwd mm1, mm5 ; mm1=BOL 302 psrld mm1, 1 ; mm1=BOL*FIX(0.500) [all …]
|
/third_party/skia/third_party/externals/libjpeg-turbo/simd/i386/ |
D | jquant-3dn.asm | 61 movq mm1, MMWORD [edx+eax*SIZEOF_JSAMPLE] 64 psubb mm1, mm7 ; mm1=(89ABCDEF) 68 punpcklbw mm3, mm1 ; mm3=(*8*9*A*B) 69 punpckhbw mm1, mm1 ; mm1=(*C*D*E*F) 92 punpcklwd mm4, mm1 ; mm4=(***C***D) 93 punpckhwd mm1, mm1 ; mm1=(***E***F) 100 psrad mm1, (DWORD_BIT-BYTE_BIT) ; mm1=(EF) 102 pi2fd mm1, mm1 107 movq MMWORD [MMBLOCK(1,3,edi,SIZEOF_FAST_FLOAT)], mm1 160 movq mm1, MMWORD [MMBLOCK(0,1,esi,SIZEOF_FAST_FLOAT)] [all …]
|
D | jccolext-mmx.asm | 266 ; mm1=(R1 R3 R5 R7)=RO, mm3=(G1 G3 G5 G7)=GO, mm5=(B1 B3 B5 B7)=BO 279 movq MMWORD [wk(1)], mm1 ; wk(1)=RO 283 movq mm6, mm1 284 punpcklwd mm1, mm3 286 movq mm7, mm1 288 pmaddwd mm1, [GOTOFF(eax,PW_F0299_F0337)] ; mm1=ROL*FIX(0.299)+GOL*FIX(0.337) 293 movq MMWORD [wk(4)], mm1 ; wk(4)=ROL*FIX(0.299)+GOL*FIX(0.337) 296 pxor mm1, mm1 298 punpcklwd mm1, mm5 ; mm1=BOL 300 psrld mm1, 1 ; mm1=BOL*FIX(0.500) [all …]
|
D | jdsample-mmx.asm | 286 movq mm5, mm1 287 punpcklbw mm1, mm3 ; mm1=row[-1][0]( 0 1 2 3) 299 paddw mm1, mm0 ; mm1=Int0L=( 0 1 2 3) 304 movq MMWORD [edx+0*SIZEOF_MMWORD], mm1 ; temporarily save 309 pand mm1, mm7 ; mm1=( 0 - - -) 312 movq MMWORD [wk(0)], mm1 329 pcmpeqb mm1, mm1 330 psllq mm1, (SIZEOF_MMWORD-2)*BYTE_BIT 331 movq mm2, mm1 333 pand mm1, MMWORD [edx+1*SIZEOF_MMWORD] ; mm1=( - - - 7) [all …]
|
/third_party/libjpeg-turbo/simd/i386/ |
D | jquant-3dn.asm | 61 movq mm1, MMWORD [edx+eax*SIZEOF_JSAMPLE] 64 psubb mm1, mm7 ; mm1=(89ABCDEF) 68 punpcklbw mm3, mm1 ; mm3=(*8*9*A*B) 69 punpckhbw mm1, mm1 ; mm1=(*C*D*E*F) 92 punpcklwd mm4, mm1 ; mm4=(***C***D) 93 punpckhwd mm1, mm1 ; mm1=(***E***F) 100 psrad mm1, (DWORD_BIT-BYTE_BIT) ; mm1=(EF) 102 pi2fd mm1, mm1 107 movq MMWORD [MMBLOCK(1,3,edi,SIZEOF_FAST_FLOAT)], mm1 160 movq mm1, MMWORD [MMBLOCK(0,1,esi,SIZEOF_FAST_FLOAT)] [all …]
|
D | jccolext-mmx.asm | 266 ; mm1=(R1 R3 R5 R7)=RO, mm3=(G1 G3 G5 G7)=GO, mm5=(B1 B3 B5 B7)=BO 279 movq MMWORD [wk(1)], mm1 ; wk(1)=RO 283 movq mm6, mm1 284 punpcklwd mm1, mm3 286 movq mm7, mm1 288 pmaddwd mm1, [GOTOFF(eax,PW_F0299_F0337)] ; mm1=ROL*FIX(0.299)+GOL*FIX(0.337) 293 movq MMWORD [wk(4)], mm1 ; wk(4)=ROL*FIX(0.299)+GOL*FIX(0.337) 296 pxor mm1, mm1 298 punpcklwd mm1, mm5 ; mm1=BOL 300 psrld mm1, 1 ; mm1=BOL*FIX(0.500) [all …]
|
/third_party/ffmpeg/libavcodec/ |
D | xvididct.c | 160 int mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7, spill; in idct_col_8() local 170 mm1 = MULT(TAN1, mm7, 16) - mm4; in idct_col_8() 175 mm4 = mm1 - mm3; in idct_col_8() 177 mm1 = mm1 + mm3; in idct_col_8() 178 mm6 = mm0 + mm1; in idct_col_8() 179 mm5 = mm0 - mm1; in idct_col_8() 186 mm1 = (int) in[2 * 8]; in idct_col_8() 188 mm3 = MULT(TAN2, mm2, 16) + mm1; in idct_col_8() 189 mm2 = MULT(TAN2, mm1, 16) - mm2; in idct_col_8() 191 LOAD_BUTTERFLY(mm0, mm1, 0 * 8, 4 * 8, spill, in); in idct_col_8() [all …]
|
/third_party/openh264/codec/encoder/core/x86/ |
D | intra_pred.asm | 336 movq mm1, [r1 + 5] 338 punpcklbw mm1, mm7 339 pmullw mm1, mm6 340 psubw mm1, mm0 342 movq2dq xmm1, mm1 350 SSE2_Copy8Times xmm1, r3d ; mm1 = b,b,b,b,b,b,b,b 425 …movq mm1,[r1+r2-8] ;get value of 11,decreasing 8 is trying to improve the performance… 430 punpckhwd mm1,mm2 ;mm1[8]=[0],mm1[7]=[6],mm1[6]=[11] 432 psrlq mm1,28h ;mm1[3]=[0],mm1[2]=[6],mm1[1]=[11] 433 …por mm3,mm1 ;mm3[6]=[3],mm3[5]=[2],mm3[4]=[1],mm3[3]=[0],mm3[2]=[6],mm3[1]=[11] [all …]
|
/third_party/openh264/codec/decoder/core/x86/ |
D | intra_pred.asm | 412 movq mm1, [r0 + 5] 414 punpcklbw mm1, mm7 415 pmullw mm1, mm6 416 psubw mm1, mm0 418 movq2dq xmm1, mm1 426 SSE2_Copy8Times xmm1, r2d ; mm1 = b,b,b,b,b,b,b,b 506 …movq mm1,[r2+r1-8] ;get value of 11,decreasing 8 is trying to improve the performance… 511 punpckhwd mm1,mm2 ;mm1[8]=[0],mm1[7]=[6],mm1[6]=[11] 513 psrlq mm1,28h ;mm1[3]=[0],mm1[2]=[6],mm1[1]=[11] 514 …por mm3,mm1 ;mm3[6]=[3],mm3[5]=[2],mm3[4]=[1],mm3[3]=[0],mm3[2]=[6],mm3[1]=[11] [all …]
|
/third_party/ltp/testcases/kernel/syscalls/mmap/ |
D | mmap19.c | 27 static char *mm1 = NULL, *mm2 = NULL; variable 40 mm1 = SAFE_MMAP(0, LEN, PROT_READ, MAP_PRIVATE, f1, 0); in run() 43 save_mm1 = mm1; in run() 46 if (strncmp(str1, mm1, strlen(str1))) in run() 52 SAFE_MUNMAP(mm1, LEN); in run() 55 mm1 = SAFE_MMAP(save_mm2, LEN, PROT_READ, MAP_PRIVATE, f1, 0); in run() 58 if (mm1 != save_mm2 || mm2 != save_mm1) in run() 61 if (strncmp(str1, mm1, strlen(str1))) in run() 81 if (mm1) in cleanup() 82 SAFE_MUNMAP(mm1, LEN); in cleanup()
|
/third_party/gstreamer/gstplugins_good/gst/goom2k1/ |
D | filters_mmx.s | 57 movq %mm0, %mm1 /* b1-v1-r1-a1-b2-v2-r2-a2 */ 66 punpckhbw %mm7, %mm1 /* 00-b1-00-v1-00-r1-00-a1 */ 82 pmullw %mm3, %mm1 /* c2*b1-c2*v1-c2*r1-c2*a1 */ 83 paddw %mm1, %mm0 91 movq (%esi,%ebp), %mm1 92 movq %mm1, %mm2 95 punpcklbw %mm7, %mm1 99 pmullw %mm4, %mm1 103 paddw %mm1, %mm0
|