1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_ports/config.h"
13 #include "idct.h"
14 #include "quantize.h"
15 #include "reconintra.h"
16 #include "reconintra4x4.h"
17 #include "encodemb.h"
18 #include "invtrans.h"
19 #include "recon.h"
20 #include "dct.h"
21 #include "g_common.h"
22 #include "encodeintra.h"
23
24 #define intra4x4ibias_rate 128
25 #define intra4x4pbias_rate 256
26
27
vp8_update_mode_context(int * abmode,int * lbmode,int i,int best_mode)28 void vp8_update_mode_context(int *abmode, int *lbmode, int i, int best_mode)
29 {
30 if (i < 12)
31 {
32 abmode[i+4] = best_mode;
33 }
34
35 if ((i & 3) != 3)
36 {
37 lbmode[i+1] = best_mode;
38 }
39
40 }
41 #if CONFIG_RUNTIME_CPU_DETECT
42 #define IF_RTCD(x) (x)
43 #else
44 #define IF_RTCD(x) NULL
45 #endif
vp8_encode_intra4x4block(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x,BLOCK * be,BLOCKD * b,int best_mode)46 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
47 {
48 vp8_predict_intra4x4(b, best_mode, b->predictor);
49
50 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
51
52 x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
53
54 x->quantize_b(be, b);
55
56 vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
57
58 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
59 }
60
vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x,BLOCK * be,BLOCKD * b,int best_mode)61 void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
62 {
63 vp8_predict_intra4x4(b, best_mode, b->predictor);
64
65 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
66
67 x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
68
69 x->quantize_b(be, b);
70
71 IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
72
73 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
74 }
75
vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * mb)76 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
77 {
78 int i;
79
80 MACROBLOCKD *x = &mb->e_mbd;
81 vp8_intra_prediction_down_copy(x);
82
83 for (i = 0; i < 16; i++)
84 {
85 BLOCK *be = &mb->block[i];
86 BLOCKD *b = &x->block[i];
87
88 vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode);
89 }
90
91 return;
92 }
93
vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x)94 void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
95 {
96 int b;
97
98 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
99
100 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
101
102 vp8_transform_intra_mby(x);
103
104 vp8_quantize_mby(x);
105
106 #if !(CONFIG_REALTIME_ONLY)
107 #if 1
108 if (x->optimize==2 ||(x->optimize && x->rddiv > 1))
109 vp8_optimize_mby(x, rtcd);
110
111 #endif
112 #endif
113
114 vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
115
116 RECON_INVOKE(&rtcd->common->recon, recon_mby)
117 (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
118
119 // make sure block modes are set the way we want them for context updates
120 for (b = 0; b < 16; b++)
121 {
122 BLOCKD *d = &x->e_mbd.block[b];
123
124 switch (x->e_mbd.mode_info_context->mbmi.mode)
125 {
126
127 case DC_PRED:
128 d->bmi.mode = B_DC_PRED;
129 break;
130 case V_PRED:
131 d->bmi.mode = B_VE_PRED;
132 break;
133 case H_PRED:
134 d->bmi.mode = B_HE_PRED;
135 break;
136 case TM_PRED:
137 d->bmi.mode = B_TM_PRED;
138 break;
139 default:
140 d->bmi.mode = B_DC_PRED;
141 break;
142
143 }
144 }
145 }
146
vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x)147 void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
148 {
149 int b;
150
151 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
152
153 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
154
155 vp8_transform_intra_mby(x);
156
157 vp8_quantize_mby(x);
158
159 vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
160
161 RECON_INVOKE(&rtcd->common->recon, recon_mby)
162 (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
163
164 // make sure block modes are set the way we want them for context updates
165 for (b = 0; b < 16; b++)
166 {
167 BLOCKD *d = &x->e_mbd.block[b];
168
169 switch (x->e_mbd.mode_info_context->mbmi.mode)
170 {
171
172 case DC_PRED:
173 d->bmi.mode = B_DC_PRED;
174 break;
175 case V_PRED:
176 d->bmi.mode = B_VE_PRED;
177 break;
178 case H_PRED:
179 d->bmi.mode = B_HE_PRED;
180 break;
181 case TM_PRED:
182 d->bmi.mode = B_TM_PRED;
183 break;
184 default:
185 d->bmi.mode = B_DC_PRED;
186 break;
187
188 }
189 }
190 }
191
vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x)192 void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
193 {
194 vp8_build_intra_predictors_mbuv(&x->e_mbd);
195
196 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
197
198 vp8_transform_mbuv(x);
199
200 vp8_quantize_mbuv(x);
201
202 #if !(CONFIG_REALTIME_ONLY)
203 #if 1
204
205 if (x->optimize==2 ||(x->optimize && x->rddiv > 1))
206 vp8_optimize_mbuv(x, rtcd);
207
208 #endif
209 #endif
210
211 vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
212
213 vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
214 }
215
vp8_encode_intra16x16mbuvrd(const VP8_ENCODER_RTCD * rtcd,MACROBLOCK * x)216 void vp8_encode_intra16x16mbuvrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
217 {
218 vp8_build_intra_predictors_mbuv(&x->e_mbd);
219
220 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
221
222 vp8_transform_mbuv(x);
223
224 vp8_quantize_mbuv(x);
225
226 vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
227
228 vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
229 }
230