• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 
12 #include "vp8/encoder/variance.h"
13 #include "vp8/encoder/onyx_int.h"
14 
15 SADFunction *vp8_sad16x16;
16 SADFunction *vp8_sad16x8;
17 SADFunction *vp8_sad8x16;
18 SADFunction *vp8_sad8x8;
19 SADFunction *vp8_sad4x4;
20 
21 variance_function *vp8_variance4x4;
22 variance_function *vp8_variance8x8;
23 variance_function *vp8_variance8x16;
24 variance_function *vp8_variance16x8;
25 variance_function *vp8_variance16x16;
26 
27 variance_function *vp8_mse16x16;
28 
29 sub_pixel_variance_function *vp8_sub_pixel_variance4x4;
30 sub_pixel_variance_function *vp8_sub_pixel_variance8x8;
31 sub_pixel_variance_function *vp8_sub_pixel_variance8x16;
32 sub_pixel_variance_function *vp8_sub_pixel_variance16x8;
33 sub_pixel_variance_function *vp8_sub_pixel_variance16x16;
34 
35 int (*vp8_block_error)(short *coeff, short *dqcoeff);
36 int (*vp8_mbblock_error)(MACROBLOCK *mb, int dc);
37 
38 int (*vp8_mbuverror)(MACROBLOCK *mb);
39 unsigned int (*vp8_get_mb_ss)(short *);
40 void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
41 void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
42 void (*vp8_fast_fdct4x4)(short *input, short *output, int pitch);
43 void (*vp8_fast_fdct8x4)(short *input, short *output, int pitch);
44 void (*short_walsh4x4)(short *input, short *output, int pitch);
45 
46 void (*vp8_subtract_b)(BLOCK *be, BLOCKD *bd, int pitch);
47 void (*vp8_subtract_mby)(short *diff, unsigned char *src, unsigned char *pred, int stride);
48 void (*vp8_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
49 void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
50 
51 unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
52 
53 // c imports
54 extern int block_error_c(short *coeff, short *dqcoeff);
55 extern int vp8_mbblock_error_c(MACROBLOCK *mb, int dc);
56 
57 extern int vp8_mbuverror_c(MACROBLOCK *mb);
58 extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
59 extern void short_fdct4x4_c(short *input, short *output, int pitch);
60 extern void short_fdct8x4_c(short *input, short *output, int pitch);
61 extern void vp8_short_walsh4x4_c(short *input, short *output, int pitch);
62 
63 extern void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch);
64 extern void subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride);
65 extern void subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
66 extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
67 
68 extern SADFunction sad16x16_c;
69 extern SADFunction sad16x8_c;
70 extern SADFunction sad8x16_c;
71 extern SADFunction sad8x8_c;
72 extern SADFunction sad4x4_c;
73 
74 extern variance_function variance16x16_c;
75 extern variance_function variance8x16_c;
76 extern variance_function variance16x8_c;
77 extern variance_function variance8x8_c;
78 extern variance_function variance4x4_c;
79 extern variance_function mse16x16_c;
80 
81 extern sub_pixel_variance_function sub_pixel_variance4x4_c;
82 extern sub_pixel_variance_function sub_pixel_variance8x8_c;
83 extern sub_pixel_variance_function sub_pixel_variance8x16_c;
84 extern sub_pixel_variance_function sub_pixel_variance16x8_c;
85 extern sub_pixel_variance_function sub_pixel_variance16x16_c;
86 
87 extern unsigned int vp8_get_mb_ss_c(short *);
88 extern unsigned int vp8_get4x4sse_cs_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
89 
90 // ppc
91 extern int vp8_block_error_ppc(short *coeff, short *dqcoeff);
92 
93 extern void vp8_short_fdct4x4_ppc(short *input, short *output, int pitch);
94 extern void vp8_short_fdct8x4_ppc(short *input, short *output, int pitch);
95 
96 extern void vp8_subtract_mby_ppc(short *diff, unsigned char *src, unsigned char *pred, int stride);
97 extern void vp8_subtract_mbuv_ppc(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
98 
99 extern SADFunction vp8_sad16x16_ppc;
100 extern SADFunction vp8_sad16x8_ppc;
101 extern SADFunction vp8_sad8x16_ppc;
102 extern SADFunction vp8_sad8x8_ppc;
103 extern SADFunction vp8_sad4x4_ppc;
104 
105 extern variance_function vp8_variance16x16_ppc;
106 extern variance_function vp8_variance8x16_ppc;
107 extern variance_function vp8_variance16x8_ppc;
108 extern variance_function vp8_variance8x8_ppc;
109 extern variance_function vp8_variance4x4_ppc;
110 extern variance_function vp8_mse16x16_ppc;
111 
112 extern sub_pixel_variance_function vp8_sub_pixel_variance4x4_ppc;
113 extern sub_pixel_variance_function vp8_sub_pixel_variance8x8_ppc;
114 extern sub_pixel_variance_function vp8_sub_pixel_variance8x16_ppc;
115 extern sub_pixel_variance_function vp8_sub_pixel_variance16x8_ppc;
116 extern sub_pixel_variance_function vp8_sub_pixel_variance16x16_ppc;
117 
118 extern unsigned int vp8_get8x8var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
119 extern unsigned int vp8_get16x16var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
120 
vp8_cmachine_specific_config(void)121 void vp8_cmachine_specific_config(void)
122 {
123     // Pure C:
124     vp8_mbuverror               = vp8_mbuverror_c;
125     vp8_fast_quantize_b           = vp8_fast_quantize_b_c;
126     vp8_short_fdct4x4            = vp8_short_fdct4x4_ppc;
127     vp8_short_fdct8x4            = vp8_short_fdct8x4_ppc;
128     vp8_fast_fdct4x4             = vp8_short_fdct4x4_ppc;
129     vp8_fast_fdct8x4             = vp8_short_fdct8x4_ppc;
130     short_walsh4x4               = vp8_short_walsh4x4_c;
131 
132     vp8_variance4x4             = vp8_variance4x4_ppc;
133     vp8_variance8x8             = vp8_variance8x8_ppc;
134     vp8_variance8x16            = vp8_variance8x16_ppc;
135     vp8_variance16x8            = vp8_variance16x8_ppc;
136     vp8_variance16x16           = vp8_variance16x16_ppc;
137     vp8_mse16x16                = vp8_mse16x16_ppc;
138 
139     vp8_sub_pixel_variance4x4     = vp8_sub_pixel_variance4x4_ppc;
140     vp8_sub_pixel_variance8x8     = vp8_sub_pixel_variance8x8_ppc;
141     vp8_sub_pixel_variance8x16    = vp8_sub_pixel_variance8x16_ppc;
142     vp8_sub_pixel_variance16x8    = vp8_sub_pixel_variance16x8_ppc;
143     vp8_sub_pixel_variance16x16   = vp8_sub_pixel_variance16x16_ppc;
144 
145     vp8_get_mb_ss                 = vp8_get_mb_ss_c;
146     vp8_get4x4sse_cs            = vp8_get4x4sse_cs_c;
147 
148     vp8_sad16x16                = vp8_sad16x16_ppc;
149     vp8_sad16x8                 = vp8_sad16x8_ppc;
150     vp8_sad8x16                 = vp8_sad8x16_ppc;
151     vp8_sad8x8                  = vp8_sad8x8_ppc;
152     vp8_sad4x4                  = vp8_sad4x4_ppc;
153 
154     vp8_block_error              = vp8_block_error_ppc;
155     vp8_mbblock_error            = vp8_mbblock_error_c;
156 
157     vp8_subtract_b               = vp8_subtract_b_c;
158     vp8_subtract_mby             = vp8_subtract_mby_ppc;
159     vp8_subtract_mbuv            = vp8_subtract_mbuv_ppc;
160 }
161