1 /*!
2 * \copy
3 * Copyright (c) 2009-2013, Cisco Systems
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 *
32 * \file mv_pred.c
33 *
34 * \brief Get MV predictor and update motion vector of mb cache
35 *
36 * \date 05/22/2009 Created
37 *
38 *************************************************************************************
39 */
40
41 #include "mv_pred.h"
42 #include "ls_defines.h"
43 namespace WelsEnc {
44 //basic pMv prediction unit for pMv width (4, 2, 1)
PredMv(const SMVComponentUnit * kpMvComp,int8_t iPartIdx,int8_t iPartW,int32_t iRef,SMVUnitXY * sMvp)45 void PredMv (const SMVComponentUnit* kpMvComp, int8_t iPartIdx, int8_t iPartW, int32_t iRef, SMVUnitXY* sMvp) {
46 const uint8_t kuiLeftIdx = g_kuiCache30ScanIdx[iPartIdx] - 1;
47 const uint8_t kuiTopIdx = g_kuiCache30ScanIdx[iPartIdx] - 6;
48
49 int32_t iMatchRef;
50 int32_t iLeftRef = kpMvComp->iRefIndexCache[kuiLeftIdx];
51 int32_t iTopRef = kpMvComp->iRefIndexCache[ kuiTopIdx];
52 int32_t iRightTopRef = kpMvComp->iRefIndexCache[kuiTopIdx + iPartW];
53 int32_t iDiagonalRef;
54 SMVUnitXY sMvA (kpMvComp->sMotionVectorCache[kuiLeftIdx]);
55 SMVUnitXY sMvB (kpMvComp->sMotionVectorCache[kuiTopIdx]);
56 SMVUnitXY sMvC;
57
58 if (REF_NOT_AVAIL == iRightTopRef) {
59 iDiagonalRef = kpMvComp->iRefIndexCache[ kuiTopIdx - 1];// left_top;
60 sMvC = kpMvComp->sMotionVectorCache[kuiTopIdx - 1];
61 } else {
62 iDiagonalRef = iRightTopRef;// right_top;
63 sMvC = kpMvComp->sMotionVectorCache[kuiTopIdx + iPartW];
64 }
65
66 if ((REF_NOT_AVAIL == iTopRef) && (REF_NOT_AVAIL == iDiagonalRef) && iLeftRef != REF_NOT_AVAIL) {
67 *sMvp = sMvA;
68 return;
69 }
70
71 // b2[diag] b1[top] b0[left] is available!
72 iMatchRef = (iRef == iLeftRef) << MB_LEFT_BIT;
73 iMatchRef |= (iRef == iTopRef) << MB_TOP_BIT;
74 iMatchRef |= (iRef == iDiagonalRef) << MB_TOPRIGHT_BIT;
75 switch (iMatchRef) {
76 case LEFT_MB_POS:// A
77 *sMvp = sMvA;
78 break;
79 case TOP_MB_POS:// B
80 *sMvp = sMvB;
81 break;
82 case TOPRIGHT_MB_POS:// C or D
83 *sMvp = sMvC;
84 break;
85 default:
86 sMvp->iMvX = WelsMedian (sMvA.iMvX, sMvB.iMvX, sMvC.iMvX);
87 sMvp->iMvY = WelsMedian (sMvA.iMvY, sMvB.iMvY, sMvC.iMvY);
88 break;
89 }
90 }
PredInter8x16Mv(SMbCache * pMbCache,int32_t iPartIdx,int8_t iRef,SMVUnitXY * sMvp)91 void PredInter8x16Mv (SMbCache* pMbCache, int32_t iPartIdx, int8_t iRef, SMVUnitXY* sMvp) {
92 const SMVComponentUnit* kpMvComp = &pMbCache->sMvComponents;
93 if (0 == iPartIdx) {
94 const int8_t kiLeftRef = kpMvComp->iRefIndexCache[6];
95 if (iRef == kiLeftRef) {
96 *sMvp = kpMvComp->sMotionVectorCache[6];
97 return;
98 }
99 } else { // 1 == iPartIdx
100 int8_t iDiagonalRef = kpMvComp->iRefIndexCache[5]; //top-right
101 int8_t iIndex = 5;
102 if (REF_NOT_AVAIL == iDiagonalRef) {
103 iDiagonalRef = kpMvComp->iRefIndexCache[2]; //top-left for 8*8 block(iIndex 1)
104 iIndex = 2;
105 }
106 if (iRef == iDiagonalRef) {
107 *sMvp = kpMvComp->sMotionVectorCache[iIndex];
108 return;
109 }
110 }
111
112 PredMv (kpMvComp, iPartIdx, 2, iRef, sMvp);
113 }
PredInter16x8Mv(SMbCache * pMbCache,int32_t iPartIdx,int8_t iRef,SMVUnitXY * sMvp)114 void PredInter16x8Mv (SMbCache* pMbCache, int32_t iPartIdx, int8_t iRef, SMVUnitXY* sMvp) {
115 const SMVComponentUnit* kpMvComp = &pMbCache->sMvComponents;
116 if (0 == iPartIdx) {
117 const int8_t kiTopRef = kpMvComp->iRefIndexCache[1];
118 if (iRef == kiTopRef) {
119 *sMvp = kpMvComp->sMotionVectorCache[1];
120 return;
121 }
122 } else { // 8 == iPartIdx
123 const int8_t kiLeftRef = kpMvComp->iRefIndexCache[18];
124 if (iRef == kiLeftRef) {
125 *sMvp = kpMvComp->sMotionVectorCache[18];
126 return;
127 }
128 }
129
130 PredMv (kpMvComp, iPartIdx, 4, iRef, sMvp);
131 }
PredSkipMv(SMbCache * pMbCache,SMVUnitXY * sMvp)132 void PredSkipMv (SMbCache* pMbCache, SMVUnitXY* sMvp) {
133 const SMVComponentUnit* kpMvComp = &pMbCache->sMvComponents;
134 const int8_t kiLeftRef = kpMvComp->iRefIndexCache[6]; //A
135 const int8_t kiTopRef = kpMvComp->iRefIndexCache[1]; //B
136
137 if (REF_NOT_AVAIL == kiLeftRef || REF_NOT_AVAIL == kiTopRef ||
138 (0 == kiLeftRef && 0 == * (int32_t*) (&kpMvComp->sMotionVectorCache[6])) ||
139 (0 == kiTopRef && 0 == * (int32_t*) (&kpMvComp->sMotionVectorCache[1]))) {
140 ST32 (sMvp, 0);
141 return;
142 }
143
144 PredMv (kpMvComp, 0, 4, 0, sMvp);
145 }
146
147 //update pMv and uiRefIndex cache for current MB, only for P_16*16 (SKIP inclusive)
UpdateP16x16MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int8_t kiRef,SMVUnitXY * pMv)148 void UpdateP16x16MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int8_t kiRef, SMVUnitXY* pMv) {
149 // optimized 11/25/2011
150 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
151 const uint32_t kuiMv32 = LD32 (pMv);
152 const uint64_t kuiMv64 = BUTTERFLY4x8 (kuiMv32);
153 uint64_t uiMvBuf[8] = { kuiMv64, kuiMv64, kuiMv64, kuiMv64, kuiMv64, kuiMv64, kuiMv64, kuiMv64 };
154 const uint16_t kuiRef16 = BUTTERFLY1x2 (kiRef);
155 const uint32_t kuiRef32 = BUTTERFLY2x4 (kuiRef16);
156
157 ST32 (pCurMb->pRefIndex, kuiRef32);
158 // update pMv range from 0~15
159 memcpy (pCurMb->sMv, uiMvBuf, sizeof (uiMvBuf)); // confirmed_safe_unsafe_usage
160
161 /*
162 * blocks 0: 7~10, 1: 13~16, 2: 19~22, 3: 25~28
163 */
164 pMvComp->iRefIndexCache[7] = kiRef;
165 ST16 (&pMvComp->iRefIndexCache[8], kuiRef16);
166 pMvComp->iRefIndexCache[10] = kiRef;
167 pMvComp->iRefIndexCache[13] = kiRef;
168 ST16 (&pMvComp->iRefIndexCache[14], kuiRef16);
169 pMvComp->iRefIndexCache[16] = kiRef;
170 pMvComp->iRefIndexCache[19] = kiRef;
171 ST16 (&pMvComp->iRefIndexCache[20], kuiRef16);
172 pMvComp->iRefIndexCache[22] = kiRef;
173 pMvComp->iRefIndexCache[25] = kiRef;
174 ST16 (&pMvComp->iRefIndexCache[26], kuiRef16);
175 pMvComp->iRefIndexCache[28] = kiRef;
176
177 /*
178 * blocks 0: 7~10, 1: 13~16, 2: 19~22, 3: 25~28
179 */
180 pMvComp->sMotionVectorCache[7] = *pMv;
181 ST64 (&pMvComp->sMotionVectorCache[8], kuiMv64);
182 pMvComp->sMotionVectorCache[10] = *pMv;
183 pMvComp->sMotionVectorCache[13] = *pMv;
184 ST64 (&pMvComp->sMotionVectorCache[14], kuiMv64);
185 pMvComp->sMotionVectorCache[16] = *pMv;
186 pMvComp->sMotionVectorCache[19] = *pMv;
187 ST64 (&pMvComp->sMotionVectorCache[20], kuiMv64);
188 pMvComp->sMotionVectorCache[22] = *pMv;
189 pMvComp->sMotionVectorCache[25] = *pMv;
190 ST64 (&pMvComp->sMotionVectorCache[26], kuiMv64);
191 pMvComp->sMotionVectorCache[28] = *pMv;
192 }
193
194 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P16x8
UpdateP16x8MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)195 void UpdateP16x8MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
196 SMVUnitXY* pMv) {
197 // optimized 11/25/2011
198 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
199 const uint32_t kuiMv32 = LD32 (pMv);
200 const uint64_t kuiMv64 = BUTTERFLY4x8 (kuiMv32);
201 uint64_t uiMvBuf[4] = { kuiMv64, kuiMv64, kuiMv64, kuiMv64 };
202 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
203 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
204 const int16_t kiCacheIdx1 = 1 + kiCacheIdx;
205 const int16_t kiCacheIdx3 = 3 + kiCacheIdx;
206 const int16_t kiCacheIdx6 = 6 + kiCacheIdx;
207 const int16_t kiCacheIdx7 = 7 + kiCacheIdx;
208 const int16_t kiCacheIdx9 = 9 + kiCacheIdx;
209 const uint16_t kuiRef16 = BUTTERFLY1x2 (kiRef);
210
211 ST16 (&pCurMb->pRefIndex[ (kiPartIdx >> 2)], kuiRef16);
212 memcpy (&pCurMb->sMv[kiScan4Idx], uiMvBuf, sizeof (uiMvBuf)); // confirmed_safe_unsafe_usage
213
214 /*
215 * blocks 0: g_kuiCache30ScanIdx[iPartIdx]~g_kuiCache30ScanIdx[iPartIdx]+3, 1: g_kuiCache30ScanIdx[iPartIdx]+6~g_kuiCache30ScanIdx[iPartIdx]+9
216 */
217 pMvComp->iRefIndexCache[kiCacheIdx] = kiRef;
218 ST16 (&pMvComp->iRefIndexCache[kiCacheIdx1], kuiRef16);
219 pMvComp->iRefIndexCache[kiCacheIdx3] = kiRef;
220 pMvComp->iRefIndexCache[kiCacheIdx6] = kiRef;
221 ST16 (&pMvComp->iRefIndexCache[kiCacheIdx7], kuiRef16);
222 pMvComp->iRefIndexCache[kiCacheIdx9] = kiRef;
223
224 /*
225 * blocks 0: g_kuiCache30ScanIdx[iPartIdx]~g_kuiCache30ScanIdx[iPartIdx]+3, 1: g_kuiCache30ScanIdx[iPartIdx]+6~g_kuiCache30ScanIdx[iPartIdx]+9
226 */
227 pMvComp->sMotionVectorCache[kiCacheIdx] = *pMv;
228 ST64 (&pMvComp->sMotionVectorCache[kiCacheIdx1], kuiMv64);
229 pMvComp->sMotionVectorCache[kiCacheIdx3] = *pMv;
230 pMvComp->sMotionVectorCache[kiCacheIdx6] = *pMv;
231 ST64 (&pMvComp->sMotionVectorCache[kiCacheIdx7], kuiMv64);
232 pMvComp->sMotionVectorCache[kiCacheIdx9] = *pMv;
233 }
234 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P8x16
update_P8x16_motion_info(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)235 void update_P8x16_motion_info (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
236 SMVUnitXY* pMv) {
237 // optimized 11/25/2011
238 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
239 const uint32_t kuiMv32 = LD32 (pMv);
240 const uint64_t kuiMv64 = BUTTERFLY4x8 (kuiMv32);
241 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
242 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
243 const int16_t kiCacheIdx1 = 1 + kiCacheIdx;
244 const int16_t kiCacheIdx3 = 3 + kiCacheIdx;
245 const int16_t kiCacheIdx12 = 12 + kiCacheIdx;
246 const int16_t kiCacheIdx13 = 13 + kiCacheIdx;
247 const int16_t kiCacheIdx15 = 15 + kiCacheIdx;
248 const int16_t kiBlkIdx = kiPartIdx >> 2;
249 const uint16_t kuiRef16 = BUTTERFLY1x2 (kiRef);
250
251 pCurMb->pRefIndex[kiBlkIdx] = kiRef;
252 pCurMb->pRefIndex[2 + kiBlkIdx] = kiRef;
253 ST64 (&pCurMb->sMv[kiScan4Idx], kuiMv64);
254 ST64 (&pCurMb->sMv[4 + kiScan4Idx], kuiMv64);
255 ST64 (&pCurMb->sMv[8 + kiScan4Idx], kuiMv64);
256 ST64 (&pCurMb->sMv[12 + kiScan4Idx], kuiMv64);
257
258 /*
259 * blocks 0: g_kuiCache30ScanIdx[iPartIdx]~g_kuiCache30ScanIdx[iPartIdx]+3, 1: g_kuiCache30ScanIdx[iPartIdx]+6~g_kuiCache30ScanIdx[iPartIdx]+9
260 */
261 pMvComp->iRefIndexCache[kiCacheIdx] = kiRef;
262 ST16 (&pMvComp->iRefIndexCache[kiCacheIdx1], kuiRef16);
263 pMvComp->iRefIndexCache[kiCacheIdx3] = kiRef;
264 pMvComp->iRefIndexCache[kiCacheIdx12] = kiRef;
265 ST16 (&pMvComp->iRefIndexCache[kiCacheIdx13], kuiRef16);
266 pMvComp->iRefIndexCache[kiCacheIdx15] = kiRef;
267
268 /*
269 * blocks 0: g_kuiCache30ScanIdx[iPartIdx]~g_kuiCache30ScanIdx[iPartIdx]+3, 1: g_kuiCache30ScanIdx[iPartIdx]+6~g_kuiCache30ScanIdx[iPartIdx]+9
270 */
271 pMvComp->sMotionVectorCache[kiCacheIdx] = *pMv;
272 ST64 (&pMvComp->sMotionVectorCache[kiCacheIdx1], kuiMv64);
273 pMvComp->sMotionVectorCache[kiCacheIdx3] = *pMv;
274 pMvComp->sMotionVectorCache[kiCacheIdx12] = *pMv;
275 ST64 (&pMvComp->sMotionVectorCache[kiCacheIdx13], kuiMv64);
276 pMvComp->sMotionVectorCache[kiCacheIdx15] = *pMv;
277 }
278 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P8x8
UpdateP8x8MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)279 void UpdateP8x8MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
280 SMVUnitXY* pMv) {
281 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
282 const uint32_t kuiMv32 = LD32 (pMv);
283 const uint64_t kuiMv64 = BUTTERFLY4x8 (kuiMv32);
284 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
285 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
286 const int16_t kiCacheIdx1 = 1 + kiCacheIdx;
287 const int16_t kiCacheIdx6 = 6 + kiCacheIdx;
288 const int16_t kiCacheIdx7 = 7 + kiCacheIdx;
289
290 //mb
291 ST64 (&pCurMb->sMv[ kiScan4Idx], kuiMv64);
292 ST64 (&pCurMb->sMv[4 + kiScan4Idx], kuiMv64);
293
294 //cache
295 pMvComp->iRefIndexCache[kiCacheIdx ] =
296 pMvComp->iRefIndexCache[kiCacheIdx1] =
297 pMvComp->iRefIndexCache[kiCacheIdx6] =
298 pMvComp->iRefIndexCache[kiCacheIdx7] = kiRef;
299 pMvComp->sMotionVectorCache[kiCacheIdx ] =
300 pMvComp->sMotionVectorCache[kiCacheIdx1] =
301 pMvComp->sMotionVectorCache[kiCacheIdx6] =
302 pMvComp->sMotionVectorCache[kiCacheIdx7] = *pMv;
303 }
304 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P4x4
UpdateP4x4MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)305 void UpdateP4x4MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
306 SMVUnitXY* pMv) {
307 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
308 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
309 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
310
311 //mb
312 pCurMb->sMv[kiScan4Idx] = *pMv;
313 //cache
314 pMvComp->iRefIndexCache[kiCacheIdx] = kiRef;
315 pMvComp->sMotionVectorCache[kiCacheIdx] = *pMv;
316 }
317 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P8x4
UpdateP8x4MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)318 void UpdateP8x4MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
319 SMVUnitXY* pMv) {
320 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
321 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
322 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
323
324 //mb
325 pCurMb->sMv[ kiScan4Idx] = *pMv;
326 pCurMb->sMv[1 + kiScan4Idx] = *pMv;
327 //cache
328 pMvComp->iRefIndexCache[ kiCacheIdx] = kiRef;
329 pMvComp->iRefIndexCache[1 + kiCacheIdx] = kiRef;
330 pMvComp->sMotionVectorCache[ kiCacheIdx] = *pMv;
331 pMvComp->sMotionVectorCache[1 + kiCacheIdx] = *pMv;
332 }
333 //update uiRefIndex and pMv of both SMB and Mb_cache, only for P4x8
UpdateP4x8MotionInfo(SMbCache * pMbCache,SMB * pCurMb,const int32_t kiPartIdx,const int8_t kiRef,SMVUnitXY * pMv)334 void UpdateP4x8MotionInfo (SMbCache* pMbCache, SMB* pCurMb, const int32_t kiPartIdx, const int8_t kiRef,
335 SMVUnitXY* pMv) {
336 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
337 const int16_t kiScan4Idx = g_kuiMbCountScan4Idx[kiPartIdx];
338 const int16_t kiCacheIdx = g_kuiCache30ScanIdx[kiPartIdx];
339
340 //mb
341 pCurMb->sMv[ kiScan4Idx] = *pMv;
342 pCurMb->sMv[4 + kiScan4Idx] = *pMv;
343 //cache
344 pMvComp->iRefIndexCache[ kiCacheIdx] = kiRef;
345 pMvComp->iRefIndexCache[6 + kiCacheIdx] = kiRef;
346 pMvComp->sMotionVectorCache[ kiCacheIdx] = *pMv;
347 pMvComp->sMotionVectorCache[6 + kiCacheIdx] = *pMv;
348 }
349 //=========================update motion info(MV and ref_idx) into Mb_cache==========================
350 //update pMv and uiRefIndex cache only for Mb_cache, only for P_16*16 (SKIP inclusive)
351
352 //update uiRefIndex and pMv of only Mb_cache, only for P16x8
UpdateP16x8Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t iRef,SMVUnitXY * pMv)353 void UpdateP16x8Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t iRef, SMVUnitXY* pMv) {
354 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
355 int32_t i;
356
357 for (i = 0; i < 2; i++, iPartIdx += 4) {
358 //cache
359 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
360
361 pMvComp->iRefIndexCache[ kuiCacheIdx] =
362 pMvComp->iRefIndexCache[1 + kuiCacheIdx] =
363 pMvComp->iRefIndexCache[6 + kuiCacheIdx] =
364 pMvComp->iRefIndexCache[7 + kuiCacheIdx] = iRef;
365 pMvComp->sMotionVectorCache[ kuiCacheIdx] =
366 pMvComp->sMotionVectorCache[1 + kuiCacheIdx] =
367 pMvComp->sMotionVectorCache[6 + kuiCacheIdx] =
368 pMvComp->sMotionVectorCache[7 + kuiCacheIdx] = *pMv;
369 }
370 }
371 //update uiRefIndex and pMv of only Mb_cache, only for P8x16
UpdateP8x16Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t iRef,SMVUnitXY * pMv)372 void UpdateP8x16Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t iRef, SMVUnitXY* pMv) {
373 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
374 int32_t i;
375
376 for (i = 0; i < 2; i++, iPartIdx += 8) {
377 //cache
378 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
379
380 pMvComp->iRefIndexCache[ kuiCacheIdx] =
381 pMvComp->iRefIndexCache[1 + kuiCacheIdx] =
382 pMvComp->iRefIndexCache[6 + kuiCacheIdx] =
383 pMvComp->iRefIndexCache[7 + kuiCacheIdx] = iRef;
384 pMvComp->sMotionVectorCache[ kuiCacheIdx] =
385 pMvComp->sMotionVectorCache[1 + kuiCacheIdx] =
386 pMvComp->sMotionVectorCache[6 + kuiCacheIdx] =
387 pMvComp->sMotionVectorCache[7 + kuiCacheIdx] = *pMv;
388 }
389 }
390
391 //update uiRefIndex and pMv of only Mb_cache, only for P8x8
UpdateP8x8Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t pRef,SMVUnitXY * pMv)392 void UpdateP8x8Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t pRef, SMVUnitXY* pMv) {
393 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
394 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
395
396 pMvComp->iRefIndexCache[ kuiCacheIdx] =
397 pMvComp->iRefIndexCache[1 + kuiCacheIdx] =
398 pMvComp->iRefIndexCache[6 + kuiCacheIdx] =
399 pMvComp->iRefIndexCache[7 + kuiCacheIdx] = pRef;
400 pMvComp->sMotionVectorCache[ kuiCacheIdx] =
401 pMvComp->sMotionVectorCache[1 + kuiCacheIdx] =
402 pMvComp->sMotionVectorCache[6 + kuiCacheIdx] =
403 pMvComp->sMotionVectorCache[7 + kuiCacheIdx] = *pMv;
404 }
405
406 //update uiRefIndex and pMv of only Mb_cache, for P4x4
UpdateP4x4Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t pRef,SMVUnitXY * pMv)407 void UpdateP4x4Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t pRef, SMVUnitXY* pMv) {
408 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
409 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
410
411 pMvComp->iRefIndexCache [kuiCacheIdx] = pRef;
412 pMvComp->sMotionVectorCache[kuiCacheIdx] = *pMv;
413 }
414
415 //update uiRefIndex and pMv of only Mb_cache, for P8x4
UpdateP8x4Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t pRef,SMVUnitXY * pMv)416 void UpdateP8x4Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t pRef, SMVUnitXY* pMv) {
417 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
418 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
419
420 pMvComp->iRefIndexCache [ kuiCacheIdx] =
421 pMvComp->iRefIndexCache [1 + kuiCacheIdx] = pRef;
422 pMvComp->sMotionVectorCache [ kuiCacheIdx] =
423 pMvComp->sMotionVectorCache[1 + kuiCacheIdx] = *pMv;
424 }
425
426 //update uiRefIndex and pMv of only Mb_cache, for P4x8
UpdateP4x8Motion2Cache(SMbCache * pMbCache,int32_t iPartIdx,int8_t pRef,SMVUnitXY * pMv)427 void UpdateP4x8Motion2Cache (SMbCache* pMbCache, int32_t iPartIdx, int8_t pRef, SMVUnitXY* pMv) {
428 SMVComponentUnit* pMvComp = &pMbCache->sMvComponents;
429 const uint8_t kuiCacheIdx = g_kuiCache30ScanIdx[iPartIdx];
430
431 pMvComp->iRefIndexCache [ kuiCacheIdx] =
432 pMvComp->iRefIndexCache [6 + kuiCacheIdx] = pRef;
433 pMvComp->sMotionVectorCache [ kuiCacheIdx] =
434 pMvComp->sMotionVectorCache[6 + kuiCacheIdx] = *pMv;
435 }
436 } // namespace WelsEnc
437