/external/libvpx/vp8/encoder/ |
D | variance_c.c | 41 unsigned int *sse, in variance() argument 48 *sse = 0; in variance() 56 *sse += diff * diff; in variance() 104 unsigned int *sse) in vp8_variance16x16_c() argument 111 *sse = var; in vp8_variance16x16_c() 120 unsigned int *sse) in vp8_variance8x16_c() argument 127 *sse = var; in vp8_variance8x16_c() 136 unsigned int *sse) in vp8_variance16x8_c() argument 143 *sse = var; in vp8_variance16x8_c() 153 unsigned int *sse) in vp8_variance8x8_c() argument [all …]
|
D | mcomp.c | 189 #define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns … 208 unsigned int sse; in vp8_find_best_sub_pixel_step_iteratively() local 223 besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step_iteratively() 321 unsigned int sse; in vp8_find_best_sub_pixel_step() local 339 bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() 345 left = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() 355 right = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() 367 up = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() 377 down = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() 398 diag = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse); in vp8_find_best_sub_pixel_step() [all …]
|
/external/libvpx/vp8/encoder/x86/ |
D | variance_mmx.c | 124 unsigned int *sse) in vp8_variance4x4_mmx() argument 130 *sse = var; in vp8_variance4x4_mmx() 140 unsigned int *sse) in vp8_variance8x8_mmx() argument 146 *sse = var; in vp8_variance8x8_mmx() 157 unsigned int *sse) in vp8_mse16x16_mmx() argument 169 *sse = var; in vp8_mse16x16_mmx() 179 int *sse) in vp8_variance16x16_mmx() argument 192 *sse = var; in vp8_variance16x16_mmx() 201 unsigned int *sse) in vp8_variance16x8_mmx() argument 211 *sse = var; in vp8_variance16x8_mmx() [all …]
|
D | variance_sse2.c | 186 unsigned int *sse) in vp8_variance16x16_wmt() argument 193 *sse = sse0; in vp8_variance16x16_wmt() 201 unsigned int *sse) in vp8_mse16x16_wmt() argument 207 *sse = sse0; in vp8_mse16x16_wmt() 219 unsigned int *sse) in vp8_variance16x8_wmt() argument 229 *sse = var; in vp8_variance16x8_wmt() 240 unsigned int *sse) in vp8_variance8x16_wmt() argument 250 *sse = var; in vp8_variance8x16_wmt() 263 unsigned int *sse in vp8_sub_pixel_variance4x4_wmt() argument 274 *sse = xxsum; in vp8_sub_pixel_variance4x4_wmt() [all …]
|
D | variance_ssse3.c | 76 unsigned int *sse in vp8_sub_pixel_variance16x16_ssse3() argument 114 *sse = xxsum0; in vp8_sub_pixel_variance16x16_ssse3() 126 unsigned int *sse in vp8_sub_pixel_variance16x8_ssse3() argument 163 *sse = xxsum0; in vp8_sub_pixel_variance16x8_ssse3()
|
/external/flac/libFLAC/ |
D | cpu.c | 169 info->data.ia32.sse = false; in FLAC__cpu_info() 183 info->data.ia32.sse = (flags_edx & FLAC__CPUINFO_IA32_CPUID_SSE )? true : false; in FLAC__cpu_info() 204 fprintf(stderr, " SSE ........ %c\n", info->data.ia32.sse ? 'Y' : 'n'); in FLAC__cpu_info() 216 if(info->data.ia32.fxsr || info->data.ia32.sse || info->data.ia32.sse2) { in FLAC__cpu_info() 219 …info->data.ia32.fxsr = info->data.ia32.sse = info->data.ia32.sse2 = info->data.ia32.sse3 = info->d… in FLAC__cpu_info() 223 int sse = 0; in FLAC__cpu_info() 226 …len = sizeof(sse); sse = sse || (sysctlbyname("hw.instruction_sse", &sse, &len, NULL, 0) == 0 && s… in FLAC__cpu_info() 227 …len = sizeof(sse); sse = sse || (sysctlbyname("hw.optional.sse" , &sse, &len, NULL, 0) == 0 && s… in FLAC__cpu_info() 228 if(!sse) in FLAC__cpu_info() 229 …info->data.ia32.fxsr = info->data.ia32.sse = info->data.ia32.sse2 = info->data.ia32.sse3 = info->d… in FLAC__cpu_info() [all …]
|
/external/libvpx/vp8/encoder/arm/ |
D | variance_arm.c | 26 unsigned int *sse in vp8_sub_pixel_variance8x8_armv6() argument 43 dst_pixels_per_line, sse); in vp8_sub_pixel_variance8x8_armv6() 54 unsigned int *sse in vp8_sub_pixel_variance16x16_armv6() argument 65 dst_ptr, dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_armv6() 70 dst_ptr, dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_armv6() 75 dst_ptr, dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_armv6() 89 dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_armv6() 107 unsigned int *sse in vp8_sub_pixel_variance16x16_neon() argument 111 …8_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_neon() 113 …8_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse); in vp8_sub_pixel_variance16x16_neon() [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vec_shuffle-16.ll | 1 ; RUN: llc < %s -march=x86 -mattr=+sse,-sse2 -mtriple=i386-apple-darwin | FileCheck %s -check-prefi… 4 ; sse: t1: 7 ; sse: shufps 14 ; sse: t2: 17 ; sse: shufps 24 ; sse: t3: 27 ; sse: shufps 34 ; sse: t4: 38 ; sse: shufps
|
D | vec_ss_load_fold.ll | 1 ; RUN: llc < %s -march=x86 -mattr=+sse,+sse2,+sse41 | FileCheck %s 11 …%tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000… 12 …%tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000… 13 …%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553… 14 …%tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializ… 15 %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1] 28 …%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.55… 29 …%tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000… 30 %tmp = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1] 39 declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) [all …]
|
D | avx-intrinsics-x86_64.ll | 29 %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1] 32 declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone 37 …%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=… 40 declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone 45 %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1] 48 declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
|
D | sse_reload_fold.ll | 12 declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) 13 declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) 14 declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) 15 declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) 16 declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) 17 declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) 41 %t = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %f) 46 %t = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %f) 51 %t = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %f) 56 %t = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %y, <4 x float> %f) [all …]
|
D | 2008-09-05-sinttofp-2xi32.ll | 24 %y = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %x) 30 %y = tail call x86_mmx @llvm.x86.sse.cvttpd2pi (<2 x double> %x) 34 declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx) 35 declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>)
|
D | 2006-10-07-ScalarSSEMiscompile.ll | 1 ; RUN: llc < %s -march=x86 -mattr=sse | grep movaps 10 …%tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %A, <4 x float> %BV ) ; <<4… 14 declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
|
D | 2010-07-02-UnfoldBug.ll | 4 declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone 6 declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone 66 …%2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> <float 1.000000e+00, float undef, float und… 67 …%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %2, <4 x float> <float 0.000000e+00, float … 70 …%5 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> <float 1.000000e+00, float undef, float und… 71 …%6 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %5, <4 x float> <float 0.000000e+00, float …
|
/external/libvpx/vp8/encoder/arm/neon/ |
D | variance_neon.asm | 27 ; stack unsigned int *sse 30 vmov.i8 q9, #0 ;q9, q10 - sse 50 vmlal.s16 q9, d22, d22 ;calculate sse 67 vadd.u32 q10, q9, q10 ;accumulate sse 70 ldr r12, [sp] ;load *sse from stack 85 vst1.32 {d1[0]}, [r12] ;store sse 100 ; unsigned int *sse) 103 vmov.i8 q9, #0 ;q9, q10 - sse 120 vmlal.s16 q9, d22, d22 ;calculate sse 137 vadd.u32 q10, q9, q10 ;accumulate sse [all …]
|
D | vp8_subpixelvariance16x16s_neon.asm | 29 ; unsigned int *sse 36 ldr lr, [sp, #4] ;load *sse from stack 38 vmov.i8 q9, #0 ;q9, q10 - sse 76 vmlal.s16 q9, d8, d8 ;sse 92 vmlal.s16 q9, d0, d0 ;sse 106 vadd.u32 q10, q9, q10 ;accumulate sse 114 vst1.32 {d1[0]}, [lr] ;store sse 129 ; unsigned int *sse 138 ldr lr, [sp, #4] ;load *sse from stack 141 vmov.i8 q9, #0 ;q9, q10 - sse [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | vec_demanded_elts.ll | 8 ; CHECK-NOT: call {{.*}} @llvm.x86.sse.mul 9 ; CHECK-NOT: call {{.*}} @llvm.x86.sse.sub 15 …%tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000… 16 …%tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000… 17 …%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553… 18 …%tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializ… 19 %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1] 48 %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03) 53 %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13) 58 %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23) [all …]
|
/external/libvpx/vp8/encoder/ppc/ |
D | variance_altivec.asm | 64 ;# Now compute sse. 98 stw r4, 0(r7) ;# sse 102 subf r3, r3, r4 ;# sse - ((sum*sum) >> DS) 142 stw r4, 0(r7) ;# sse 146 subf r3, r3, r4 ;# sse - ((sum*sum) >> 8) 197 ;# r7 unsigned int *sse 214 ;# Now compute sse. 231 stw r3, 0(r7) ;# sse 242 ;# r7 unsigned int *sse 262 ;# r7 unsigned int *sse [all …]
|
/external/llvm/test/Transforms/ConstProp/ |
D | calls.ll | 35 …%i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 1.75, float undef, float undef, floa… 36 …%i1 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 1.75, float undef, float undef, flo… 37 …%i2 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 1.75, float undef, float undef, fl… 38 …%i3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 1.75, float undef, float undef, f… 54 declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone 55 declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone 56 declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone 57 declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
|
/external/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_mse16x16_armv6.asm | 22 ; stack unsigned int *sse 32 mov r4, #0 ; initialize sse = 0 53 ; calculate sse 72 ; calculate sse 93 ; calculate sse 116 ; calculate sse 125 ldr r1, [sp, #28] ; get address of sse 126 mov r0, r4 ; return sse 127 str r4, [r1] ; store sse
|
D | vp8_variance8x8_armv6.asm | 22 ; stack unsigned int *sse 28 mov r5, #0 ; initialize sse = 0 50 ; calculate sse 76 ; calculate sse 86 ldr r8, [sp, #32] ; get address of sse 88 str r5, [r8] ; store sse 89 sub r0, r5, r1, ASR #6 ; return (sse - ((sum * sum) >> 6))
|
D | vp8_variance16x16_armv6.asm | 24 ; stack unsigned int *sse 29 mov r11, #0 ; initialize sse = 0 52 ; calculate sse 76 ; calculate sse 100 ; calculate sse 126 ; calculate sse 138 ldr r6, [sp, #40] ; get address of sse 140 str r11, [r6] ; store sse 141 sub r0, r11, r0, asr #8 ; return (sse - ((sum * sum) >> 8))
|
D | vp8_variance_halfpixvar16x16_h_armv6.asm | 24 ; stack unsigned int *sse 30 mov r11, #0 ; initialize sse = 0 57 ; calculate sse 88 ; calculate sse 119 ; calculate sse 152 ; calculate sse 163 ldr r6, [sp, #40] ; get address of sse 165 str r11, [r6] ; store sse 166 sub r0, r11, r0, asr #8 ; return (sse - ((sum * sum) >> 8))
|
D | vp8_variance_halfpixvar16x16_v_armv6.asm | 24 ; stack unsigned int *sse 30 mov r11, #0 ; initialize sse = 0 58 ; calculate sse 89 ; calculate sse 120 ; calculate sse 153 ; calculate sse 165 ldr r6, [sp, #40] ; get address of sse 167 str r11, [r6] ; store sse 168 sub r0, r11, r0, asr #8 ; return (sse - ((sum * sum) >> 8))
|
/external/webp/src/enc/ |
D | webpenc.c | 278 const uint64_t* const sse = enc->sse_; in FinalizePSNR() local 279 stats->PSNR[0] = (float)GetPSNR(sse[0], size); in FinalizePSNR() 280 stats->PSNR[1] = (float)GetPSNR(sse[1], size / 4); in FinalizePSNR() 281 stats->PSNR[2] = (float)GetPSNR(sse[2], size / 4); in FinalizePSNR() 282 stats->PSNR[3] = (float)GetPSNR(sse[0] + sse[1] + sse[2], size * 3 / 2); in FinalizePSNR()
|