1 /*
2 * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (C) 2006 Robert Edele <yartrebo@earthlink.net>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #ifndef AVCODEC_SNOW_H
23 #define AVCODEC_SNOW_H
24
25 #include "libavutil/motion_vector.h"
26
27 #include "hpeldsp.h"
28 #include "me_cmp.h"
29 #include "qpeldsp.h"
30 #include "snow_dwt.h"
31
32 #include "rangecoder.h"
33 #include "mathops.h"
34
35 #include "mpegvideo.h"
36 #include "h264qpel.h"
37
38 #define FF_ME_ITER 3
39
40 #define MID_STATE 128
41
42 #define MAX_PLANES 4
43 #define QSHIFT 5
44 #define QROOT (1<<QSHIFT)
45 #define LOSSLESS_QLOG -128
46 #define FRAC_BITS 4
47 #define MAX_REF_FRAMES 8
48
49 #define LOG2_OBMC_MAX 8
50 #define OBMC_MAX (1<<(LOG2_OBMC_MAX))
51 typedef struct BlockNode{
52 int16_t mx; ///< Motion vector component X, see mv_scale
53 int16_t my; ///< Motion vector component Y, see mv_scale
54 uint8_t ref; ///< Reference frame index
55 uint8_t color[3]; ///< Color for intra
56 uint8_t type; ///< Bitfield of BLOCK_*
57 //#define TYPE_SPLIT 1
58 #define BLOCK_INTRA 1 ///< Intra block, inter otherwise
59 #define BLOCK_OPT 2 ///< Block needs no checks in this round of iterative motion estiation
60 //#define TYPE_NOCOLOR 4
61 uint8_t level; //FIXME merge into type?
62 }BlockNode;
63
64 static const BlockNode null_block= { //FIXME add border maybe
65 .color= {128,128,128},
66 .mx= 0,
67 .my= 0,
68 .ref= 0,
69 .type= 0,
70 .level= 0,
71 };
72
73 #define LOG2_MB_SIZE 4
74 #define MB_SIZE (1<<LOG2_MB_SIZE)
75 #define ENCODER_EXTRA_BITS 4
76 #define HTAPS_MAX 8
77
78 typedef struct x_and_coeff{
79 int16_t x;
80 uint16_t coeff;
81 } x_and_coeff;
82
83 typedef struct SubBand{
84 int level;
85 int stride;
86 int width;
87 int height;
88 int qlog; ///< log(qscale)/log[2^(1/6)]
89 DWTELEM *buf;
90 IDWTELEM *ibuf;
91 int buf_x_offset;
92 int buf_y_offset;
93 int stride_line; ///< Stride measured in lines, not pixels.
94 x_and_coeff * x_coeff;
95 struct SubBand *parent;
96 uint8_t state[/*7*2*/ 7 + 512][32];
97 }SubBand;
98
99 typedef struct Plane{
100 int width;
101 int height;
102 SubBand band[MAX_DECOMPOSITIONS][4];
103
104 int htaps;
105 int8_t hcoeff[HTAPS_MAX/2];
106 int diag_mc;
107 int fast_mc;
108
109 int last_htaps;
110 int8_t last_hcoeff[HTAPS_MAX/2];
111 int last_diag_mc;
112 }Plane;
113
114 typedef struct SnowContext{
115 AVClass *class;
116 AVCodecContext *avctx;
117 RangeCoder c;
118 MECmpContext mecc;
119 HpelDSPContext hdsp;
120 QpelDSPContext qdsp;
121 VideoDSPContext vdsp;
122 H264QpelContext h264qpel;
123 MpegvideoEncDSPContext mpvencdsp;
124 SnowDWTContext dwt;
125 AVFrame *input_picture; ///< new_picture with the internal linesizes
126 AVFrame *current_picture;
127 AVFrame *last_picture[MAX_REF_FRAMES];
128 uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4];
129 AVFrame *mconly_picture;
130 // uint8_t q_context[16];
131 uint8_t header_state[32];
132 uint8_t block_state[128 + 32*128];
133 int keyframe;
134 int always_reset;
135 int version;
136 int spatial_decomposition_type;
137 int last_spatial_decomposition_type;
138 int temporal_decomposition_type;
139 int spatial_decomposition_count;
140 int last_spatial_decomposition_count;
141 int temporal_decomposition_count;
142 int max_ref_frames;
143 int ref_frames;
144 int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
145 uint32_t *ref_scores[MAX_REF_FRAMES];
146 DWTELEM *spatial_dwt_buffer;
147 DWTELEM *temp_dwt_buffer;
148 IDWTELEM *spatial_idwt_buffer;
149 IDWTELEM *temp_idwt_buffer;
150 int *run_buffer;
151 int colorspace_type;
152 int chroma_h_shift;
153 int chroma_v_shift;
154 int spatial_scalability;
155 int qlog;
156 int last_qlog;
157 int lambda;
158 int lambda2;
159 int pass1_rc;
160 int mv_scale;
161 int last_mv_scale;
162 int qbias;
163 int last_qbias;
164 #define QBIAS_SHIFT 3
165 int b_width;
166 int b_height;
167 int block_max_depth;
168 int last_block_max_depth;
169 int nb_planes;
170 Plane plane[MAX_PLANES];
171 BlockNode *block;
172 #define ME_CACHE_SIZE 1024
173 unsigned me_cache[ME_CACHE_SIZE];
174 unsigned me_cache_generation;
175 slice_buffer sb;
176 int memc_only;
177 int no_bitstream;
178 int intra_penalty;
179 int motion_est;
180 int iterative_dia_size;
181 int scenechange_threshold;
182
183 MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
184
185 uint8_t *scratchbuf;
186 uint8_t *emu_edge_buffer;
187
188 AVMotionVector *avmv;
189 unsigned avmv_size;
190 int avmv_index;
191 uint64_t encoding_error[AV_NUM_DATA_POINTERS];
192
193 int pred;
194 }SnowContext;
195
196 /* Tables */
197 extern const uint8_t * const ff_obmc_tab[4];
198 extern uint8_t ff_qexp[QROOT];
199 extern int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES];
200
201 /* C bits used by mmx/sse2/altivec */
202
snow_interleave_line_header(int * i,int width,IDWTELEM * low,IDWTELEM * high)203 static av_always_inline void snow_interleave_line_header(int * i, int width, IDWTELEM * low, IDWTELEM * high){
204 (*i) = (width) - 2;
205
206 if (width & 1){
207 low[(*i)+1] = low[((*i)+1)>>1];
208 (*i)--;
209 }
210 }
211
snow_interleave_line_footer(int * i,IDWTELEM * low,IDWTELEM * high)212 static av_always_inline void snow_interleave_line_footer(int * i, IDWTELEM * low, IDWTELEM * high){
213 for (; (*i)>=0; (*i)-=2){
214 low[(*i)+1] = high[(*i)>>1];
215 low[*i] = low[(*i)>>1];
216 }
217 }
218
snow_horizontal_compose_lift_lead_out(int i,IDWTELEM * dst,IDWTELEM * src,IDWTELEM * ref,int width,int w,int lift_high,int mul,int add,int shift)219 static av_always_inline void snow_horizontal_compose_lift_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){
220 for(; i<w; i++){
221 dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift);
222 }
223
224 if((width^lift_high)&1){
225 dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift);
226 }
227 }
228
snow_horizontal_compose_liftS_lead_out(int i,IDWTELEM * dst,IDWTELEM * src,IDWTELEM * ref,int width,int w)229 static av_always_inline void snow_horizontal_compose_liftS_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w){
230 for(; i<w; i++){
231 dst[i] = src[i] + ((ref[i] + ref[(i+1)]+W_BO + 4 * src[i]) >> W_BS);
232 }
233
234 if(width&1){
235 dst[w] = src[w] + ((2 * ref[w] + W_BO + 4 * src[w]) >> W_BS);
236 }
237 }
238
239 /* common code */
240
241 int ff_snow_common_init(AVCodecContext *avctx);
242 int ff_snow_common_init_after_header(AVCodecContext *avctx);
243 void ff_snow_common_end(SnowContext *s);
244 void ff_snow_release_buffer(AVCodecContext *avctx);
245 void ff_snow_reset_contexts(SnowContext *s);
246 int ff_snow_alloc_blocks(SnowContext *s);
247 int ff_snow_frame_start(SnowContext *s);
248 void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride,
249 int sx, int sy, int b_w, int b_h, const BlockNode *block,
250 int plane_index, int w, int h);
251 int ff_snow_get_buffer(SnowContext *s, AVFrame *frame);
252 /* common inline functions */
253 //XXX doublecheck all of them should stay inlined
254
pred_mv(SnowContext * s,int * mx,int * my,int ref,const BlockNode * left,const BlockNode * top,const BlockNode * tr)255 static inline void pred_mv(SnowContext *s, int *mx, int *my, int ref,
256 const BlockNode *left, const BlockNode *top, const BlockNode *tr){
257 if(s->ref_frames == 1){
258 *mx = mid_pred(left->mx, top->mx, tr->mx);
259 *my = mid_pred(left->my, top->my, tr->my);
260 }else{
261 const int *scale = ff_scale_mv_ref[ref];
262 *mx = mid_pred((left->mx * scale[left->ref] + 128) >>8,
263 (top ->mx * scale[top ->ref] + 128) >>8,
264 (tr ->mx * scale[tr ->ref] + 128) >>8);
265 *my = mid_pred((left->my * scale[left->ref] + 128) >>8,
266 (top ->my * scale[top ->ref] + 128) >>8,
267 (tr ->my * scale[tr ->ref] + 128) >>8);
268 }
269 }
270
same_block(BlockNode * a,BlockNode * b)271 static av_always_inline int same_block(BlockNode *a, BlockNode *b){
272 if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
273 return !((a->color[0] - b->color[0]) | (a->color[1] - b->color[1]) | (a->color[2] - b->color[2]));
274 }else{
275 return !((a->mx - b->mx) | (a->my - b->my) | (a->ref - b->ref) | ((a->type ^ b->type)&BLOCK_INTRA));
276 }
277 }
278
279 //FIXME name cleanup (b_w, block_w, b_width stuff)
280 //XXX should we really inline it?
add_yblock(SnowContext * s,int sliced,slice_buffer * sb,IDWTELEM * dst,uint8_t * dst8,const uint8_t * obmc,int src_x,int src_y,int b_w,int b_h,int w,int h,int dst_stride,int src_stride,int obmc_stride,int b_x,int b_y,int add,int offset_dst,int plane_index)281 static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
282 const int b_width = s->b_width << s->block_max_depth;
283 const int b_height= s->b_height << s->block_max_depth;
284 const int b_stride= b_width;
285 BlockNode *lt= &s->block[b_x + b_y*b_stride];
286 BlockNode *rt= lt+1;
287 BlockNode *lb= lt+b_stride;
288 BlockNode *rb= lb+1;
289 uint8_t *block[4];
290 // When src_stride is large enough, it is possible to interleave the blocks.
291 // Otherwise the blocks are written sequentially in the tmp buffer.
292 int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
293 uint8_t *tmp = s->scratchbuf;
294 uint8_t *ptmp;
295 int x,y;
296
297 if(b_x<0){
298 lt= rt;
299 lb= rb;
300 }else if(b_x + 1 >= b_width){
301 rt= lt;
302 rb= lb;
303 }
304 if(b_y<0){
305 lt= lb;
306 rt= rb;
307 }else if(b_y + 1 >= b_height){
308 lb= lt;
309 rb= rt;
310 }
311
312 if(src_x<0){ //FIXME merge with prev & always round internal width up to *16
313 obmc -= src_x;
314 b_w += src_x;
315 if(!sliced && !offset_dst)
316 dst -= src_x;
317 src_x=0;
318 }
319 if(src_x + b_w > w){
320 b_w = w - src_x;
321 }
322 if(src_y<0){
323 obmc -= src_y*obmc_stride;
324 b_h += src_y;
325 if(!sliced && !offset_dst)
326 dst -= src_y*dst_stride;
327 src_y=0;
328 }
329 if(src_y + b_h> h){
330 b_h = h - src_y;
331 }
332
333 if(b_w<=0 || b_h<=0) return;
334
335 if(!sliced && offset_dst)
336 dst += src_x + src_y*dst_stride;
337 dst8+= src_x + src_y*src_stride;
338 // src += src_x + src_y*src_stride;
339
340 ptmp= tmp + 3*tmp_step;
341 block[0]= ptmp;
342 ptmp+=tmp_step;
343 ff_snow_pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
344
345 if(same_block(lt, rt)){
346 block[1]= block[0];
347 }else{
348 block[1]= ptmp;
349 ptmp+=tmp_step;
350 ff_snow_pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
351 }
352
353 if(same_block(lt, lb)){
354 block[2]= block[0];
355 }else if(same_block(rt, lb)){
356 block[2]= block[1];
357 }else{
358 block[2]= ptmp;
359 ptmp+=tmp_step;
360 ff_snow_pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
361 }
362
363 if(same_block(lt, rb) ){
364 block[3]= block[0];
365 }else if(same_block(rt, rb)){
366 block[3]= block[1];
367 }else if(same_block(lb, rb)){
368 block[3]= block[2];
369 }else{
370 block[3]= ptmp;
371 ff_snow_pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
372 }
373 if(sliced){
374 s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
375 }else{
376 for(y=0; y<b_h; y++){
377 //FIXME ugly misuse of obmc_stride
378 const uint8_t *obmc1= obmc + y*obmc_stride;
379 const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
380 const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
381 const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
382 for(x=0; x<b_w; x++){
383 int v= obmc1[x] * block[3][x + y*src_stride]
384 +obmc2[x] * block[2][x + y*src_stride]
385 +obmc3[x] * block[1][x + y*src_stride]
386 +obmc4[x] * block[0][x + y*src_stride];
387
388 v <<= 8 - LOG2_OBMC_MAX;
389 if(FRAC_BITS != 8){
390 v >>= 8 - FRAC_BITS;
391 }
392 if(add){
393 v += dst[x + y*dst_stride];
394 v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
395 if(v&(~255)) v= ~(v>>31);
396 dst8[x + y*src_stride] = v;
397 }else{
398 dst[x + y*dst_stride] -= v;
399 }
400 }
401 }
402 }
403 }
404
predict_slice(SnowContext * s,IDWTELEM * buf,int plane_index,int add,int mb_y)405 static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int plane_index, int add, int mb_y){
406 Plane *p= &s->plane[plane_index];
407 const int mb_w= s->b_width << s->block_max_depth;
408 const int mb_h= s->b_height << s->block_max_depth;
409 int x, y, mb_x;
410 int block_size = MB_SIZE >> s->block_max_depth;
411 int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
412 int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
413 const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
414 const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
415 int ref_stride= s->current_picture->linesize[plane_index];
416 uint8_t *dst8= s->current_picture->data[plane_index];
417 int w= p->width;
418 int h= p->height;
419 av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
420 if(s->keyframe || (s->avctx->debug&512)){
421 if(mb_y==mb_h)
422 return;
423
424 if(add){
425 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
426 for(x=0; x<w; x++){
427 int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
428 v >>= FRAC_BITS;
429 if(v&(~255)) v= ~(v>>31);
430 dst8[x + y*ref_stride]= v;
431 }
432 }
433 }else{
434 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
435 for(x=0; x<w; x++){
436 buf[x + y*w]-= 128<<FRAC_BITS;
437 }
438 }
439 }
440
441 return;
442 }
443
444 for(mb_x=0; mb_x<=mb_w; mb_x++){
445 add_yblock(s, 0, NULL, buf, dst8, obmc,
446 block_w*mb_x - block_w/2,
447 block_h*mb_y - block_h/2,
448 block_w, block_h,
449 w, h,
450 w, ref_stride, obmc_stride,
451 mb_x - 1, mb_y - 1,
452 add, 1, plane_index);
453 }
454 }
455
predict_plane(SnowContext * s,IDWTELEM * buf,int plane_index,int add)456 static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add){
457 const int mb_h= s->b_height << s->block_max_depth;
458 int mb_y;
459 for(mb_y=0; mb_y<=mb_h; mb_y++)
460 predict_slice(s, buf, plane_index, add, mb_y);
461 }
462
set_blocks(SnowContext * s,int level,int x,int y,int l,int cb,int cr,int mx,int my,int ref,int type)463 static inline void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
464 const int w= s->b_width << s->block_max_depth;
465 const int rem_depth= s->block_max_depth - level;
466 const int index= (x + y*w) << rem_depth;
467 const int block_w= 1<<rem_depth;
468 const int block_h= 1<<rem_depth; //FIXME "w!=h"
469 BlockNode block;
470 int i,j;
471
472 block.color[0]= l;
473 block.color[1]= cb;
474 block.color[2]= cr;
475 block.mx= mx;
476 block.my= my;
477 block.ref= ref;
478 block.type= type;
479 block.level= level;
480
481 for(j=0; j<block_h; j++){
482 for(i=0; i<block_w; i++){
483 s->block[index + i + j*w]= block;
484 }
485 }
486 }
487
init_ref(MotionEstContext * c,uint8_t * src[3],uint8_t * ref[3],uint8_t * ref2[3],int x,int y,int ref_index)488 static inline void init_ref(MotionEstContext *c, uint8_t *src[3], uint8_t *ref[3], uint8_t *ref2[3], int x, int y, int ref_index){
489 SnowContext *s = c->avctx->priv_data;
490 const int offset[3]= {
491 y*c-> stride + x,
492 ((y*c->uvstride + x)>>s->chroma_h_shift),
493 ((y*c->uvstride + x)>>s->chroma_h_shift),
494 };
495 int i;
496 for(i=0; i<3; i++){
497 c->src[0][i]= src [i];
498 c->ref[0][i]= ref [i] + offset[i];
499 }
500 av_assert2(!ref_index);
501 }
502
503
504 /* bitstream functions */
505
506 extern const int8_t ff_quant3bA[256];
507
508 #define QEXPSHIFT (7-FRAC_BITS+8) //FIXME try to change this to 0
509
put_symbol(RangeCoder * c,uint8_t * state,int v,int is_signed)510 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
511 int i;
512
513 if(v){
514 const int a= FFABS(v);
515 const int e= av_log2(a);
516 const int el= FFMIN(e, 10);
517 put_rac(c, state+0, 0);
518
519 for(i=0; i<el; i++){
520 put_rac(c, state+1+i, 1); //1..10
521 }
522 for(; i<e; i++){
523 put_rac(c, state+1+9, 1); //1..10
524 }
525 put_rac(c, state+1+FFMIN(i,9), 0);
526
527 for(i=e-1; i>=el; i--){
528 put_rac(c, state+22+9, (a>>i)&1); //22..31
529 }
530 for(; i>=0; i--){
531 put_rac(c, state+22+i, (a>>i)&1); //22..31
532 }
533
534 if(is_signed)
535 put_rac(c, state+11 + el, v < 0); //11..21
536 }else{
537 put_rac(c, state+0, 1);
538 }
539 }
540
get_symbol(RangeCoder * c,uint8_t * state,int is_signed)541 static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
542 if(get_rac(c, state+0))
543 return 0;
544 else{
545 int i, e;
546 unsigned a;
547 e= 0;
548 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
549 e++;
550 if (e > 31)
551 return AVERROR_INVALIDDATA;
552 }
553
554 a= 1;
555 for(i=e-1; i>=0; i--){
556 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
557 }
558
559 e= -(is_signed && get_rac(c, state+11 + FFMIN(e,10))); //11..21
560 return (a^e)-e;
561 }
562 }
563
put_symbol2(RangeCoder * c,uint8_t * state,int v,int log2)564 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2){
565 int i;
566 int r= log2>=0 ? 1<<log2 : 1;
567
568 av_assert2(v>=0);
569 av_assert2(log2>=-4);
570
571 while(v >= r){
572 put_rac(c, state+4+log2, 1);
573 v -= r;
574 log2++;
575 if(log2>0) r+=r;
576 }
577 put_rac(c, state+4+log2, 0);
578
579 for(i=log2-1; i>=0; i--){
580 put_rac(c, state+31-i, (v>>i)&1);
581 }
582 }
583
get_symbol2(RangeCoder * c,uint8_t * state,int log2)584 static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2){
585 int i;
586 int r= log2>=0 ? 1<<log2 : 1;
587 int v=0;
588
589 av_assert2(log2>=-4);
590
591 while(log2<28 && get_rac(c, state+4+log2)){
592 v+= r;
593 log2++;
594 if(log2>0) r+=r;
595 }
596
597 for(i=log2-1; i>=0; i--){
598 v+= get_rac(c, state+31-i)<<i;
599 }
600
601 return v;
602 }
603
unpack_coeffs(SnowContext * s,SubBand * b,SubBand * parent,int orientation)604 static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, int orientation){
605 const int w= b->width;
606 const int h= b->height;
607 int x,y;
608
609 int run, runs;
610 x_and_coeff *xc= b->x_coeff;
611 x_and_coeff *prev_xc= NULL;
612 x_and_coeff *prev2_xc= xc;
613 x_and_coeff *parent_xc= parent ? parent->x_coeff : NULL;
614 x_and_coeff *prev_parent_xc= parent_xc;
615
616 runs= get_symbol2(&s->c, b->state[30], 0);
617 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
618 else run= INT_MAX;
619
620 for(y=0; y<h; y++){
621 int v=0;
622 int lt=0, t=0, rt=0;
623
624 if(y && prev_xc->x == 0){
625 rt= prev_xc->coeff;
626 }
627 for(x=0; x<w; x++){
628 int p=0;
629 const int l= v;
630
631 lt= t; t= rt;
632
633 if(y){
634 if(prev_xc->x <= x)
635 prev_xc++;
636 if(prev_xc->x == x + 1)
637 rt= prev_xc->coeff;
638 else
639 rt=0;
640 }
641 if(parent_xc){
642 if(x>>1 > parent_xc->x){
643 parent_xc++;
644 }
645 if(x>>1 == parent_xc->x){
646 p= parent_xc->coeff;
647 }
648 }
649 if(/*ll|*/l|lt|t|rt|p){
650 int context= av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
651
652 v=get_rac(&s->c, &b->state[0][context]);
653 if(v){
654 v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
655 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
656 if ((uint16_t)v != v) {
657 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
658 v = 1;
659 }
660 xc->x=x;
661 (xc++)->coeff= v;
662 }
663 }else{
664 if(!run){
665 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
666 else run= INT_MAX;
667 v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
668 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
669 if ((uint16_t)v != v) {
670 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
671 v = 1;
672 }
673
674 xc->x=x;
675 (xc++)->coeff= v;
676 }else{
677 int max_run;
678 run--;
679 v=0;
680 av_assert2(run >= 0);
681 if(y) max_run= FFMIN(run, prev_xc->x - x - 2);
682 else max_run= FFMIN(run, w-x-1);
683 if(parent_xc)
684 max_run= FFMIN(max_run, 2*parent_xc->x - x - 1);
685 av_assert2(max_run >= 0 && max_run <= run);
686
687 x+= max_run;
688 run-= max_run;
689 }
690 }
691 }
692 (xc++)->x= w+1; //end marker
693 prev_xc= prev2_xc;
694 prev2_xc= xc;
695
696 if(parent_xc){
697 if(y&1){
698 while(parent_xc->x != parent->width+1)
699 parent_xc++;
700 parent_xc++;
701 prev_parent_xc= parent_xc;
702 }else{
703 parent_xc= prev_parent_xc;
704 }
705 }
706 }
707
708 (xc++)->x= w+1; //end marker
709 }
710
711 #endif /* AVCODEC_SNOW_H */
712