1 /*!
2 * \copy
3 * Copyright (c) 2009-2013, Cisco Systems
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 *
32 * \file svc_set_mb_syn_cavlc.h
33 *
34 * \brief Seting all syntax elements of mb and decoding residual with cavlc
35 *
36 * \date 2009.8.12 Created
37 *
38 *************************************************************************************
39 */
40
41 #include "vlc_encoder.h"
42 #include "ls_defines.h"
43 #include "svc_set_mb_syn.h"
44
45 namespace WelsEnc {
46 const uint32_t g_kuiIntra4x4CbpMap[48] = {
47 3, 29, 30, 17, 31, 18, 37, 8, 32, 38, 19, 9, 20, 10, 11, 2, //15
48 16, 33, 34, 21, 35, 22, 39, 4, 36, 40, 23, 5, 24, 6, 7, 1, //31
49 41, 42, 43, 25, 44, 26, 46, 12, 45, 47, 27, 13, 28, 14, 15, 0 //47
50 };
51
52 const uint32_t g_kuiInterCbpMap[48] = {
53 0, 2, 3, 7, 4, 8, 17, 13, 5, 18, 9, 14, 10, 15, 16, 11, //15
54 1, 32, 33, 36, 34, 37, 44, 40, 35, 45, 38, 41, 39, 42, 43, 19, //31
55 6, 24, 25, 20, 26, 21, 46, 28, 27, 47, 22, 29, 23, 30, 31, 12 //47
56 };
57
58 //============================Enhance Layer CAVLC Writing===========================
WelsSpatialWriteMbPred(sWelsEncCtx * pEncCtx,SSlice * pSlice,SMB * pCurMb)59 void WelsSpatialWriteMbPred (sWelsEncCtx* pEncCtx, SSlice* pSlice, SMB* pCurMb) {
60 SMbCache* pMbCache = &pSlice->sMbCacheInfo;
61 SBitStringAux* pBs = pSlice->pSliceBsa;
62 SSliceHeaderExt* pSliceHeadExt = &pSlice->sSliceHeaderExt;
63 int32_t iNumRefIdxl0ActiveMinus1 = pSliceHeadExt->sSliceHeader.uiNumRefIdxL0Active - 1;
64
65 Mb_Type uiMbType = pCurMb->uiMbType;
66 int32_t iCbpChroma = pCurMb->uiCbp >> 4;
67 int32_t iCbpLuma = pCurMb->uiCbp & 15;
68 int32_t i = 0;
69
70 SMVUnitXY sMvd[2];
71 bool* pPredFlag;
72 int8_t* pRemMode;
73
74 int32_t iMbOffset = 0;
75
76 switch (pSliceHeadExt->sSliceHeader.eSliceType) {
77 case I_SLICE:
78 iMbOffset = 0;
79 break;
80 case P_SLICE:
81 iMbOffset = 5;
82 break;
83 default:
84 return;
85 }
86
87 switch (uiMbType) {
88 case MB_TYPE_INTRA4x4:
89 /* mb type */
90 BsWriteUE (pBs, iMbOffset + 0);
91
92 /* prediction: luma */
93 pPredFlag = &pMbCache->pPrevIntra4x4PredModeFlag[0];
94 pRemMode = &pMbCache->pRemIntra4x4PredModeFlag[0];
95 do {
96 BsWriteOneBit (pBs, *pPredFlag); /* b_prev_intra4x4_pred_mode */
97
98 if (!*pPredFlag) {
99 BsWriteBits (pBs, 3, *pRemMode);
100 }
101
102 pPredFlag++;
103 pRemMode++;
104 ++ i;
105 } while (i < 16);
106
107 /* prediction: chroma */
108 BsWriteUE (pBs, g_kiMapModeIntraChroma[pMbCache->uiChmaI8x8Mode]);
109
110 break;
111
112 case MB_TYPE_INTRA16x16:
113 /* mb type */
114 BsWriteUE (pBs, 1 + iMbOffset + g_kiMapModeI16x16[pMbCache->uiLumaI16x16Mode] + (iCbpChroma << 2) +
115 (iCbpLuma == 0 ? 0 : 12));
116
117 /* prediction: chroma */
118 BsWriteUE (pBs, g_kiMapModeIntraChroma[pMbCache->uiChmaI8x8Mode]);
119
120 break;
121
122 case MB_TYPE_16x16:
123 BsWriteUE (pBs, 0); //uiMbType
124 sMvd[0].sDeltaMv (pCurMb->sMv[0], pMbCache->sMbMvp[0]);
125
126 if (iNumRefIdxl0ActiveMinus1 > 0) {
127 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[0]);
128 }
129
130 BsWriteSE (pBs, sMvd[0].iMvX);
131 BsWriteSE (pBs, sMvd[0].iMvY);
132
133 break;
134
135 case MB_TYPE_16x8:
136 BsWriteUE (pBs, 1); //uiMbType
137
138 sMvd[0].sDeltaMv (pCurMb->sMv[0], pMbCache->sMbMvp[0]);
139 sMvd[1].sDeltaMv (pCurMb->sMv[8], pMbCache->sMbMvp[1]);
140
141 if (iNumRefIdxl0ActiveMinus1 > 0) {
142 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[0]);
143 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[2]);
144 }
145 BsWriteSE (pBs, sMvd[0].iMvX); //block0
146 BsWriteSE (pBs, sMvd[0].iMvY);
147 BsWriteSE (pBs, sMvd[1].iMvX); //block1
148 BsWriteSE (pBs, sMvd[1].iMvY);
149
150 break;
151
152 case MB_TYPE_8x16:
153 BsWriteUE (pBs, 2); //uiMbType
154
155 sMvd[0].sDeltaMv (pCurMb->sMv[0], pMbCache->sMbMvp[0]);
156 sMvd[1].sDeltaMv (pCurMb->sMv[2], pMbCache->sMbMvp[1]);
157
158 if (iNumRefIdxl0ActiveMinus1 > 0) {
159 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[0]);
160 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[1]);
161 }
162 BsWriteSE (pBs, sMvd[0].iMvX); //block0
163 BsWriteSE (pBs, sMvd[0].iMvY);
164 BsWriteSE (pBs, sMvd[1].iMvX); //block1
165 BsWriteSE (pBs, sMvd[1].iMvY);
166
167 break;
168 }
169 }
170
WelsSpatialWriteSubMbPred(sWelsEncCtx * pEncCtx,SSlice * pSlice,SMB * pCurMb)171 void WelsSpatialWriteSubMbPred (sWelsEncCtx* pEncCtx, SSlice* pSlice, SMB* pCurMb) {
172 SMbCache* pMbCache = &pSlice->sMbCacheInfo;
173 SBitStringAux* pBs = pSlice->pSliceBsa;
174 SSliceHeaderExt* pSliceHeadExt = &pSlice->sSliceHeaderExt;
175
176 int32_t iNumRefIdxl0ActiveMinus1 = pSliceHeadExt->sSliceHeader.uiNumRefIdxL0Active - 1;
177 int32_t i;
178
179 bool bSubRef0 = false;
180 const uint8_t* kpScan4 = & (g_kuiMbCountScan4Idx[0]);
181
182 /* mb type */
183 if (LD32 (pCurMb->pRefIndex) == 0) {
184 BsWriteUE (pBs, 4);
185 bSubRef0 = false;
186 } else {
187 BsWriteUE (pBs, 3);
188 bSubRef0 = true;
189 }
190
191 //step 1: sub_mb_type
192 for (i = 0; i < 4; i++) {
193 switch (pCurMb->uiSubMbType[i]) {
194 case SUB_MB_TYPE_8x8:
195 BsWriteUE (pBs, 0);
196 break;
197 case SUB_MB_TYPE_8x4:
198 BsWriteUE (pBs, 1);
199 break;
200 case SUB_MB_TYPE_4x8:
201 BsWriteUE (pBs, 2);
202 break;
203 case SUB_MB_TYPE_4x4:
204 BsWriteUE (pBs, 3);
205 break;
206 default: //should not enter
207 break;
208 }
209 }
210
211 //step 2: get and write uiRefIndex and sMvd
212 if (iNumRefIdxl0ActiveMinus1 > 0 && bSubRef0) {
213 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[0]);
214 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[1]);
215 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[2]);
216 BsWriteTE (pBs, iNumRefIdxl0ActiveMinus1, pCurMb->pRefIndex[3]);
217 }
218 //write sMvd
219 for (i = 0; i < 4; i++) {
220 uint32_t uiSubMbType = pCurMb->uiSubMbType[i];
221 if (SUB_MB_TYPE_8x8 == uiSubMbType) {
222 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvX - pMbCache->sMbMvp[*kpScan4].iMvX);
223 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvY - pMbCache->sMbMvp[*kpScan4].iMvY);
224 } else if (SUB_MB_TYPE_4x4 == uiSubMbType) {
225 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvX - pMbCache->sMbMvp[*kpScan4].iMvX);
226 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvY - pMbCache->sMbMvp[*kpScan4].iMvY);
227 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 1)].iMvX - pMbCache->sMbMvp[* (kpScan4 + 1)].iMvX);
228 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 1)].iMvY - pMbCache->sMbMvp[* (kpScan4 + 1)].iMvY);
229 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 2)].iMvX - pMbCache->sMbMvp[* (kpScan4 + 2)].iMvX);
230 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 2)].iMvY - pMbCache->sMbMvp[* (kpScan4 + 2)].iMvY);
231 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 3)].iMvX - pMbCache->sMbMvp[* (kpScan4 + 3)].iMvX);
232 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 3)].iMvY - pMbCache->sMbMvp[* (kpScan4 + 3)].iMvY);
233 } else if (SUB_MB_TYPE_8x4 == uiSubMbType) {
234 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvX - pMbCache->sMbMvp[*kpScan4].iMvX);
235 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvY - pMbCache->sMbMvp[*kpScan4].iMvY);
236 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 2)].iMvX - pMbCache->sMbMvp[* (kpScan4 + 2)].iMvX);
237 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 2)].iMvY - pMbCache->sMbMvp[* (kpScan4 + 2)].iMvY);
238 } else if (SUB_MB_TYPE_4x8 == uiSubMbType) {
239 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvX - pMbCache->sMbMvp[*kpScan4].iMvX);
240 BsWriteSE (pBs, pCurMb->sMv[*kpScan4].iMvY - pMbCache->sMbMvp[*kpScan4].iMvY);
241 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 1)].iMvX - pMbCache->sMbMvp[* (kpScan4 + 1)].iMvX);
242 BsWriteSE (pBs, pCurMb->sMv[* (kpScan4 + 1)].iMvY - pMbCache->sMbMvp[* (kpScan4 + 1)].iMvY);
243 }
244 kpScan4 += 4;
245 }
246 }
247
CheckBitstreamBuffer(const uint32_t kuiSliceIdx,sWelsEncCtx * pEncCtx,SBitStringAux * pBs)248 int32_t CheckBitstreamBuffer (const uint32_t kuiSliceIdx, sWelsEncCtx* pEncCtx, SBitStringAux* pBs) {
249 const intX_t iLeftLength = pBs->pEndBuf - pBs->pCurBuf - 1;
250 assert (iLeftLength > 0);
251
252 if (iLeftLength < MAX_MACROBLOCK_SIZE_IN_BYTE_x2) {
253 return ENC_RETURN_VLCOVERFLOWFOUND;//ENC_RETURN_MEMALLOCERR;
254 //TODO: call the realloc© instead
255 }
256 return ENC_RETURN_SUCCESS;
257 }
258
259 //============================Base Layer CAVLC Writing===============================
WelsSpatialWriteMbSyn(sWelsEncCtx * pEncCtx,SSlice * pSlice,SMB * pCurMb)260 int32_t WelsSpatialWriteMbSyn (sWelsEncCtx* pEncCtx, SSlice* pSlice, SMB* pCurMb) {
261 SBitStringAux* pBs = pSlice->pSliceBsa;
262 SMbCache* pMbCache = &pSlice->sMbCacheInfo;
263 const uint8_t kuiChromaQpIndexOffset = pEncCtx->pCurDqLayer->sLayerInfo.pPpsP->uiChromaQpIndexOffset;
264
265 if (IS_SKIP (pCurMb->uiMbType)) {
266 pCurMb->uiLumaQp = pSlice->uiLastMbQp;
267 pCurMb->uiChromaQp = g_kuiChromaQpTable[CLIP3_QP_0_51 (pCurMb->uiLumaQp + kuiChromaQpIndexOffset)];
268
269 pSlice->iMbSkipRun++;
270 return ENC_RETURN_SUCCESS;
271 } else {
272 if (pEncCtx->eSliceType != I_SLICE) {
273 BsWriteUE (pBs, pSlice->iMbSkipRun);
274 pSlice->iMbSkipRun = 0;
275 }
276 /* Step 1: write mb type and pred */
277 if (IS_Inter_8x8 (pCurMb->uiMbType)) {
278 WelsSpatialWriteSubMbPred (pEncCtx, pSlice, pCurMb);
279 } else {
280 WelsSpatialWriteMbPred (pEncCtx, pSlice, pCurMb);
281 }
282
283 /* Step 2: write coded block patern */
284 if (IS_INTRA4x4 (pCurMb->uiMbType)) {
285 BsWriteUE (pBs, g_kuiIntra4x4CbpMap[pCurMb->uiCbp]);
286 } else if (!IS_INTRA16x16 (pCurMb->uiMbType)) {
287 BsWriteUE (pBs, g_kuiInterCbpMap[pCurMb->uiCbp]);
288 }
289
290 /* Step 3: write QP and residual */
291 if (pCurMb->uiCbp > 0 || IS_INTRA16x16 (pCurMb->uiMbType)) {
292 const int32_t kiDeltaQp = pCurMb->uiLumaQp - pSlice->uiLastMbQp;
293 pSlice->uiLastMbQp = pCurMb->uiLumaQp;
294
295 BsWriteSE (pBs, kiDeltaQp);
296 if (WelsWriteMbResidual (pEncCtx->pFuncList, pMbCache, pCurMb, pBs))
297 return ENC_RETURN_VLCOVERFLOWFOUND;
298 } else {
299 pCurMb->uiLumaQp = pSlice->uiLastMbQp;
300 pCurMb->uiChromaQp = g_kuiChromaQpTable[CLIP3_QP_0_51 (pCurMb->uiLumaQp +
301 pEncCtx->pCurDqLayer->sLayerInfo.pPpsP->uiChromaQpIndexOffset)];
302 }
303
304 /* Step 4: Check the left buffer */
305 return CheckBitstreamBuffer (pSlice->iSliceIdx, pEncCtx, pBs);
306 }
307 }
308
WelsWriteMbResidual(SWelsFuncPtrList * pFuncList,SMbCache * sMbCacheInfo,SMB * pCurMb,SBitStringAux * pBs)309 int32_t WelsWriteMbResidual (SWelsFuncPtrList* pFuncList, SMbCache* sMbCacheInfo, SMB* pCurMb, SBitStringAux* pBs) {
310 int32_t i;
311 Mb_Type uiMbType = pCurMb->uiMbType;
312 const int32_t kiCbpChroma = pCurMb->uiCbp >> 4;
313 const int32_t kiCbpLuma = pCurMb->uiCbp & 0x0F;
314 int8_t* pNonZeroCoeffCount = sMbCacheInfo->iNonZeroCoeffCount;
315 int16_t* pBlock;
316 int8_t iA, iB, iC;
317
318 if (IS_INTRA16x16 (uiMbType)) {
319 /* DC luma */
320 iA = pNonZeroCoeffCount[8];
321 iB = pNonZeroCoeffCount[ 1];
322 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
323 if (WriteBlockResidualCavlc (pFuncList, sMbCacheInfo->pDct->iLumaI16x16Dc, 15, 1, LUMA_4x4, iC, pBs))
324 return ENC_RETURN_VLCOVERFLOWFOUND;
325
326 /* AC Luma */
327 if (kiCbpLuma) {
328 pBlock = sMbCacheInfo->pDct->iLumaBlock[0];
329
330 for (i = 0; i < 16; i++) {
331 int32_t iIdx = g_kuiCache48CountScan4Idx[i];
332 iA = pNonZeroCoeffCount[iIdx - 1];
333 iB = pNonZeroCoeffCount[iIdx - 8];
334 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
335 if (WriteBlockResidualCavlc (pFuncList, pBlock, 14, pNonZeroCoeffCount[iIdx] > 0, LUMA_AC, iC, pBs))
336 return ENC_RETURN_VLCOVERFLOWFOUND;
337 pBlock += 16;
338 }
339 }
340 } else {
341 /* Luma DC AC */
342 if (kiCbpLuma) {
343 pBlock = sMbCacheInfo->pDct->iLumaBlock[0];
344
345 for (i = 0; i < 16; i += 4) {
346 if (kiCbpLuma & (1 << (i >> 2))) {
347 int32_t iIdx = g_kuiCache48CountScan4Idx[i];
348 const int8_t kiA = pNonZeroCoeffCount[iIdx];
349 const int8_t kiB = pNonZeroCoeffCount[iIdx + 1];
350 const int8_t kiC = pNonZeroCoeffCount[iIdx + 8];
351 const int8_t kiD = pNonZeroCoeffCount[iIdx + 9];
352 iA = pNonZeroCoeffCount[iIdx - 1];
353 iB = pNonZeroCoeffCount[iIdx - 8];
354 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
355 if (WriteBlockResidualCavlc (pFuncList, pBlock, 15, kiA > 0, LUMA_4x4, iC, pBs))
356 return ENC_RETURN_VLCOVERFLOWFOUND;
357
358 iA = kiA;
359 iB = pNonZeroCoeffCount[iIdx - 7];
360 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
361 if (WriteBlockResidualCavlc (pFuncList, pBlock + 16, 15, kiB > 0, LUMA_4x4, iC, pBs))
362 return ENC_RETURN_VLCOVERFLOWFOUND;
363
364 iA = pNonZeroCoeffCount[iIdx + 7];
365 iB = kiA;
366 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
367 if (WriteBlockResidualCavlc (pFuncList, pBlock + 32, 15, kiC > 0, LUMA_4x4, iC, pBs))
368 return ENC_RETURN_VLCOVERFLOWFOUND;
369
370 iA = kiC;
371 iB = kiB;
372 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
373 if (WriteBlockResidualCavlc (pFuncList, pBlock + 48, 15, kiD > 0, LUMA_4x4, iC, pBs))
374 return ENC_RETURN_VLCOVERFLOWFOUND;
375 }
376 pBlock += 64;
377 }
378 }
379 }
380
381 if (kiCbpChroma) {
382 /* Chroma DC residual present */
383 pBlock = sMbCacheInfo->pDct->iChromaDc[0]; // Cb
384 if (WriteBlockResidualCavlc (pFuncList, pBlock, 3, 1, CHROMA_DC, CHROMA_DC_NC_OFFSET, pBs))
385 return ENC_RETURN_VLCOVERFLOWFOUND;
386
387 pBlock += 4; // Cr
388 if (WriteBlockResidualCavlc (pFuncList, pBlock, 3, 1, CHROMA_DC, CHROMA_DC_NC_OFFSET, pBs))
389 return ENC_RETURN_VLCOVERFLOWFOUND;
390
391 /* Chroma AC residual present */
392 if (kiCbpChroma & 0x02) {
393 const uint8_t* kCache48CountScan4Idx16base = &g_kuiCache48CountScan4Idx[16];
394 pBlock = sMbCacheInfo->pDct->iChromaBlock[0]; // Cb
395
396 for (i = 0; i < 4; i++) {
397 int32_t iIdx = kCache48CountScan4Idx16base[i];
398 iA = pNonZeroCoeffCount[iIdx - 1];
399 iB = pNonZeroCoeffCount[iIdx - 8];
400 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
401 if (WriteBlockResidualCavlc (pFuncList, pBlock, 14, pNonZeroCoeffCount[iIdx] > 0, CHROMA_AC, iC, pBs))
402 return ENC_RETURN_VLCOVERFLOWFOUND;
403 pBlock += 16;
404 }
405
406 pBlock = sMbCacheInfo->pDct->iChromaBlock[4]; // Cr
407
408 for (i = 0; i < 4; i++) {
409 int32_t iIdx = 24 + kCache48CountScan4Idx16base[i];
410 iA = pNonZeroCoeffCount[iIdx - 1];
411 iB = pNonZeroCoeffCount[iIdx - 8];
412 WELS_NON_ZERO_COUNT_AVERAGE (iC, iA, iB);
413 if (WriteBlockResidualCavlc (pFuncList, pBlock, 14, pNonZeroCoeffCount[iIdx] > 0, CHROMA_AC, iC, pBs))
414 return ENC_RETURN_VLCOVERFLOWFOUND;
415 pBlock += 16;
416 }
417 }
418 }
419 return 0;
420 }
421
422 } // namespace WelsEnc
423