/third_party/ffmpeg/libavcodec/ |
D | adpcm.c | 162 c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18); in adpcm_decode_init() 163 c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18); in adpcm_decode_init() 169 c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18); in adpcm_decode_init() 171 c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18); in adpcm_decode_init() 174 c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18); in adpcm_decode_init() 176 c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18); in adpcm_decode_init() 238 pred = c->predictor; in adpcm_agm_expand_nibble() 256 c->predictor = pred; in adpcm_agm_expand_nibble() 274 c->predictor = pred; in adpcm_agm_expand_nibble() 281 int predictor; in adpcm_ima_expand_nibble() local [all …]
|
D | dpcm.c | 216 int predictor[2]; in dpcm_decode_frame() local 270 predictor[1] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16); in dpcm_decode_frame() 271 predictor[0] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16); in dpcm_decode_frame() 273 predictor[0] = sign_extend(bytestream2_get_le16u(&gb), 16); in dpcm_decode_frame() 278 predictor[ch] += s->array[bytestream2_get_byteu(&gb)]; in dpcm_decode_frame() 279 predictor[ch] = av_clip_int16(predictor[ch]); in dpcm_decode_frame() 280 *output_samples++ = predictor[ch]; in dpcm_decode_frame() 291 predictor[ch] = sign_extend(bytestream2_get_le16u(&gb), 16); in dpcm_decode_frame() 292 *output_samples++ = predictor[ch]; in dpcm_decode_frame() 297 predictor[ch] += interplay_delta_table[bytestream2_get_byteu(&gb)]; in dpcm_decode_frame() [all …]
|
D | vmdaudio.c | 110 int predictor[2]; in decode_audio_s16() local 115 predictor[ch] = (int16_t)AV_RL16(buf); in decode_audio_s16() 117 *out++ = predictor[ch]; in decode_audio_s16() 125 predictor[ch] -= vmdaudio_table[b & 0x7F]; in decode_audio_s16() 127 predictor[ch] += vmdaudio_table[b]; in decode_audio_s16() 128 predictor[ch] = av_clip_int16(predictor[ch]); in decode_audio_s16() 129 *out++ = predictor[ch]; in decode_audio_s16()
|
D | adpcmenc.c | 286 int predictor, nibble, bias; in adpcm_ms_compress_sample() local 288 predictor = (((c->sample1) * (c->coeff1)) + in adpcm_ms_compress_sample() 291 nibble = sample - predictor; in adpcm_ms_compress_sample() 300 predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta; in adpcm_ms_compress_sample() 303 c->sample1 = av_clip_int16(predictor); in adpcm_ms_compress_sample() 318 c->predictor = 0; in adpcm_yamaha_compress_sample() 322 delta = sample - c->predictor; in adpcm_yamaha_compress_sample() 326 c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); in adpcm_yamaha_compress_sample() 327 c->predictor = av_clip_int16(c->predictor); in adpcm_yamaha_compress_sample() 371 nodes[0]->sample1 = c->predictor; in adpcm_compress_trellis() [all …]
|
D | tta.c | 253 s->ch_ctx[i].predictor = 0; in tta_decode_frame() 265 int32_t *predictor = &s->ch_ctx[cur_chan].predictor; in tta_decode_frame() local 323 case 1: *p += PRED(*predictor, 4); break; in tta_decode_frame() 325 case 3: *p += PRED(*predictor, 5); break; in tta_decode_frame() 326 case 4: *p += *predictor; break; in tta_decode_frame() 328 *predictor = *p; in tta_decode_frame()
|
D | ttaenc.c | 101 s->ch_ctx[i].predictor = 0; in tta_encode_frame() 125 case 1: value -= PRED(c->predictor, 4); break; in tta_encode_frame() 127 case 3: value -= PRED(c->predictor, 5); break; in tta_encode_frame() 129 c->predictor = temp; in tta_encode_frame()
|
D | mjpeg.h | 118 #define PREDICT(ret, topleft, top, left, predictor)\ argument 119 switch(predictor){\
|
D | huffyuvdec.c | 330 s->predictor = method & 63; in decode_init() 355 s->predictor = LEFT; in decode_init() 359 s->predictor = LEFT; in decode_init() 363 s->predictor = PLANE; in decode_init() 367 s->predictor = MEDIAN; in decode_init() 371 s->predictor = LEFT; // OLD in decode_init() 554 if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P && in decode_init() 907 switch (s->predictor) { in decode_slice() 918 if (s->predictor == PLANE) { in decode_slice() 985 switch (s->predictor) { in decode_slice() [all …]
|
D | aptx.c | 441 int32_t reconstructed_sample, predictor, srd0; in aptx_prediction_filtering() local 447 …predictor = av_clip_intp2((MUL64(prediction->s_weight[0], prediction->previous_reconstructed_sampl… in aptx_prediction_filtering() 460 prediction->predicted_sample = av_clip_intp2(predictor + prediction->predicted_difference, 23); in aptx_prediction_filtering()
|
D | huffyuvenc.c | 315 s->predictor = avctx->prediction_method; in encode_init() 360 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) { in encode_init() 366 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6); in encode_init() 795 if (s->predictor==MEDIAN) { in encode_frame() 852 if (s->predictor == PLANE && s->interlaced < y) { in encode_frame() 868 if (s->predictor == PLANE && s->interlaced < cy) { in encode_frame() 903 if (s->predictor == PLANE && s->interlaced < y) { in encode_frame() 931 if (s->predictor == PLANE && s->interlaced < y) { in encode_frame() 960 if (s->predictor==MEDIAN) { in encode_frame() 983 if (s->predictor == PLANE && s->interlaced < y) { in encode_frame() [all …]
|
D | adpcm.h | 32 int predictor; member
|
D | exrdsp.h | 27 void (*predictor)(uint8_t *src, ptrdiff_t size); member
|
D | ttadata.h | 39 int32_t predictor; member
|
D | exrdsp.c | 52 c->predictor = predictor_scalar; in ff_exrdsp_init()
|
D | ljpegenc.c | 134 const AVFrame *frame, int predictor, in ljpeg_encode_yuv_mb() argument 162 ptr[-1], predictor); in ljpeg_encode_yuv_mb() 186 PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor); in ljpeg_encode_yuv_mb()
|
D | huffyuv.h | 58 Predictor predictor; member
|
/third_party/openGLES/extensions/SGIX/ |
D | SGIX_mpeg1.txt | 35 MPEG predictor frames are stored as two-dimensional RGB images of 38 predictor images takes place through the GL routines 67 * When drawing from a predictor object, we always take the 69 the predictor? A similar question arises for drawing to a 70 predictor object. 77 * Currently, we require that the user allocate a predictor 79 predictor created at context init. Is this the correct 106 predictor space available? 123 * We have used a scheme for predictor management which is 133 upsampling prior to motion compensation and predictor [all …]
|
D | SGIX_mpeg2.txt | 31 read from only the top or bottom fields of a predictor frame. The 131 predictor update step is performed differently than the MPEG1 132 predictor update step. In the predictor update step during MPEG2 133 field decompression, only the top or bottom field of the predictor 136 the predictor). The other field of the predictor is left 137 unchanged. The predictor height must be twice the height of the 139 processing terminates without any change to the predictor frame 179 predictor is determined by the values of 183 PACK_MPEG_PREDICTOR_FWD_SGIX, the predictor bound to 186 the predictor bound to PACK_MPEG_PREDICTOR_BACK_SGIX is used as [all …]
|
/third_party/skia/third_party/externals/opengl-registry/extensions/SGIX/ |
D | SGIX_mpeg1.txt | 35 MPEG predictor frames are stored as two-dimensional RGB images of 38 predictor images takes place through the GL routines 67 * When drawing from a predictor object, we always take the 69 the predictor? A similar question arises for drawing to a 70 predictor object. 77 * Currently, we require that the user allocate a predictor 79 predictor created at context init. Is this the correct 106 predictor space available? 123 * We have used a scheme for predictor management which is 133 upsampling prior to motion compensation and predictor [all …]
|
D | SGIX_mpeg2.txt | 31 read from only the top or bottom fields of a predictor frame. The 131 predictor update step is performed differently than the MPEG1 132 predictor update step. In the predictor update step during MPEG2 133 field decompression, only the top or bottom field of the predictor 136 the predictor). The other field of the predictor is left 137 unchanged. The predictor height must be twice the height of the 139 processing terminates without any change to the predictor frame 179 predictor is determined by the values of 183 PACK_MPEG_PREDICTOR_FWD_SGIX, the predictor bound to 186 the predictor bound to PACK_MPEG_PREDICTOR_BACK_SGIX is used as [all …]
|
/third_party/ffmpeg/libavcodec/x86/ |
D | exrdsp_init.c | 43 dsp->predictor = ff_predictor_ssse3; in ff_exrdsp_init_x86() 46 dsp->predictor = ff_predictor_avx; in ff_exrdsp_init_x86() 50 dsp->predictor = ff_predictor_avx2; in ff_exrdsp_init_x86()
|
D | exrdsp.asm | 5 ;* reorder_pixels, predictor based on patch by John Loy 8 ;* predictor AVX/AVX2 by Henrik Gramner 75 cglobal predictor, 2,2,5, src, size
|
/third_party/libsnd/src/ |
D | ima_adpcm.c | 257 int chan, k, diff, bytecode, predictor ; in aiff_ima_decode_block() local 280 predictor = (int) ((short) ((blockdata [0] << 8) | (blockdata [1] & 0x80))) ; in aiff_ima_decode_block() 310 predictor += diff ; in aiff_ima_decode_block() 311 if (predictor < -32768) in aiff_ima_decode_block() 312 predictor = -32768 ; in aiff_ima_decode_block() 313 else if (predictor > 32767) in aiff_ima_decode_block() 314 predictor = 32767 ; in aiff_ima_decode_block() 316 pima->samples [pima->channels * k + chan] = predictor ; in aiff_ima_decode_block() 388 { int chan, k, predictor, blockindx, indx, indxstart, diff ; in wavlike_ima_decode_block() local 405 { predictor = pima->block [chan*4] | (pima->block [chan*4+1] << 8) ; in wavlike_ima_decode_block() [all …]
|
/third_party/skia/third_party/externals/dng_sdk/source/ |
D | dng_lossless_jpeg.cpp | 2515 int32 predictor = QuickPredict (col, in DecodeImage() local 2522 curRowBuf [col] [curComp] = (ComponentType) (d + predictor); in DecodeImage() 2978 int32 predictor [4]; in FreqCountSet() local 2984 predictor [channel] = 1 << (fSrcBitDepth - 1); in FreqCountSet() 2987 predictor [channel] = sPtr [channel - fSrcRowStep]; in FreqCountSet() 2996 int32 pred0 = predictor [0]; in FreqCountSet() 2997 int32 pred1 = predictor [1]; in FreqCountSet() 3036 int16 diff = (int16) (pixel - predictor [channel]); in FreqCountSet() 3040 predictor [channel] = pixel; in FreqCountSet() 3084 int32 predictor [4]; in HuffEncode() local [all …]
|
/third_party/ffmpeg/tests/checkasm/ |
D | exrdsp.c | 88 if (check_func(h.predictor, "predictor")) in checkasm_check_exrdsp()
|