1 /*
2 * H.263/MPEG-4 backend for encoder and decoder
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * H.263+ support.
5 * Copyright (c) 2001 Juan J. Sierralta P
6 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * H.263/MPEG-4 codec.
28 */
29
30 #include "libavutil/thread.h"
31 #include "mpegvideo.h"
32 #include "h263.h"
33 #include "h263data.h"
34 #include "h263dsp.h"
35 #include "idctdsp.h"
36 #include "mathops.h"
37 #include "mpegpicture.h"
38 #include "mpegutils.h"
39 #include "rl.h"
40
h263_init_rl_inter(void)41 static av_cold void h263_init_rl_inter(void)
42 {
43 static uint8_t h263_rl_inter_table[2][2 * MAX_RUN + MAX_LEVEL + 3];
44 ff_rl_init(&ff_h263_rl_inter, h263_rl_inter_table);
45 }
46
ff_h263_init_rl_inter(void)47 av_cold void ff_h263_init_rl_inter(void)
48 {
49 static AVOnce init_static_once = AV_ONCE_INIT;
50 ff_thread_once(&init_static_once, h263_init_rl_inter);
51 }
52
ff_h263_update_motion_val(MpegEncContext * s)53 void ff_h263_update_motion_val(MpegEncContext * s){
54 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
55 //FIXME a lot of that is only needed for !low_delay
56 const int wrap = s->b8_stride;
57 const int xy = s->block_index[0];
58
59 s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
60
61 if(s->mv_type != MV_TYPE_8X8){
62 int motion_x, motion_y;
63 if (s->mb_intra) {
64 motion_x = 0;
65 motion_y = 0;
66 } else if (s->mv_type == MV_TYPE_16X16) {
67 motion_x = s->mv[0][0][0];
68 motion_y = s->mv[0][0][1];
69 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
70 int i;
71 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
72 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
73 motion_x = (motion_x>>1) | (motion_x&1);
74 for(i=0; i<2; i++){
75 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
76 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
77 }
78 s->current_picture.ref_index[0][4*mb_xy ] =
79 s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
80 s->current_picture.ref_index[0][4*mb_xy + 2] =
81 s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
82 }
83
84 /* no update if 8X8 because it has been done during parsing */
85 s->current_picture.motion_val[0][xy][0] = motion_x;
86 s->current_picture.motion_val[0][xy][1] = motion_y;
87 s->current_picture.motion_val[0][xy + 1][0] = motion_x;
88 s->current_picture.motion_val[0][xy + 1][1] = motion_y;
89 s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
90 s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
91 s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
92 s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
93 }
94
95 if(s->encoding){ //FIXME encoding MUST be cleaned up
96 if (s->mv_type == MV_TYPE_8X8)
97 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
98 else if(s->mb_intra)
99 s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
100 else
101 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
102 }
103 }
104
ff_h263_loop_filter(MpegEncContext * s)105 void ff_h263_loop_filter(MpegEncContext * s){
106 int qp_c;
107 const int linesize = s->linesize;
108 const int uvlinesize= s->uvlinesize;
109 const int xy = s->mb_y * s->mb_stride + s->mb_x;
110 uint8_t *dest_y = s->dest[0];
111 uint8_t *dest_cb= s->dest[1];
112 uint8_t *dest_cr= s->dest[2];
113
114 /*
115 Diag Top
116 Left Center
117 */
118 if (!IS_SKIP(s->current_picture.mb_type[xy])) {
119 qp_c= s->qscale;
120 s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c);
121 s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
122 }else
123 qp_c= 0;
124
125 if(s->mb_y){
126 int qp_dt, qp_tt, qp_tc;
127
128 if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
129 qp_tt=0;
130 else
131 qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
132
133 if(qp_c)
134 qp_tc= qp_c;
135 else
136 qp_tc= qp_tt;
137
138 if(qp_tc){
139 const int chroma_qp= s->chroma_qscale_table[qp_tc];
140 s->h263dsp.h263_v_loop_filter(dest_y, linesize, qp_tc);
141 s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
142
143 s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp);
144 s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp);
145 }
146
147 if(qp_tt)
148 s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
149
150 if(s->mb_x){
151 if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
152 qp_dt= qp_tt;
153 else
154 qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
155
156 if(qp_dt){
157 const int chroma_qp= s->chroma_qscale_table[qp_dt];
158 s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize, linesize, qp_dt);
159 s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
160 s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
161 }
162 }
163 }
164
165 if(qp_c){
166 s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
167 if(s->mb_y + 1 == s->mb_height)
168 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
169 }
170
171 if(s->mb_x){
172 int qp_lc;
173 if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
174 qp_lc= qp_c;
175 else
176 qp_lc = s->current_picture.qscale_table[xy - 1];
177
178 if(qp_lc){
179 s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
180 if(s->mb_y + 1 == s->mb_height){
181 const int chroma_qp= s->chroma_qscale_table[qp_lc];
182 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
183 s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp);
184 s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp);
185 }
186 }
187 }
188 }
189
ff_h263_pred_motion(MpegEncContext * s,int block,int dir,int * px,int * py)190 int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
191 int *px, int *py)
192 {
193 int wrap;
194 int16_t *A, *B, *C, (*mot_val)[2];
195 static const int off[4]= {2, 1, 1, -1};
196
197 wrap = s->b8_stride;
198 mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
199
200 A = mot_val[ - 1];
201 /* special case for first (slice) line */
202 if (s->first_slice_line && block<3) {
203 // we can't just change some MVs to simulate that as we need them for the B-frames (and ME)
204 // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
205 if(block==0){ //most common case
206 if(s->mb_x == s->resync_mb_x){ //rare
207 *px= *py = 0;
208 }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
209 C = mot_val[off[block] - wrap];
210 if(s->mb_x==0){
211 *px = C[0];
212 *py = C[1];
213 }else{
214 *px = mid_pred(A[0], 0, C[0]);
215 *py = mid_pred(A[1], 0, C[1]);
216 }
217 }else{
218 *px = A[0];
219 *py = A[1];
220 }
221 }else if(block==1){
222 if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
223 C = mot_val[off[block] - wrap];
224 *px = mid_pred(A[0], 0, C[0]);
225 *py = mid_pred(A[1], 0, C[1]);
226 }else{
227 *px = A[0];
228 *py = A[1];
229 }
230 }else{ /* block==2*/
231 B = mot_val[ - wrap];
232 C = mot_val[off[block] - wrap];
233 if(s->mb_x == s->resync_mb_x) //rare
234 A[0]=A[1]=0;
235
236 *px = mid_pred(A[0], B[0], C[0]);
237 *py = mid_pred(A[1], B[1], C[1]);
238 }
239 } else {
240 B = mot_val[ - wrap];
241 C = mot_val[off[block] - wrap];
242 *px = mid_pred(A[0], B[0], C[0]);
243 *py = mid_pred(A[1], B[1], C[1]);
244 }
245 return *mot_val;
246 }
247