1 /*
2 * Copyright © 2018, VideoLAN and dav1d authors
3 * Copyright © 2018, Two Orioles, LLC
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "config.h"
29
30 #include <string.h>
31
32 #include "common/intops.h"
33
34 #include "src/ctx.h"
35 #include "src/levels.h"
36 #include "src/lf_mask.h"
37 #include "src/tables.h"
38
decomp_tx(uint8_t (* const txa)[2][32][32],const enum RectTxfmSize from,const int depth,const int y_off,const int x_off,const uint16_t * const tx_masks)39 static void decomp_tx(uint8_t (*const txa)[2 /* txsz, step */][32 /* y */][32 /* x */],
40 const enum RectTxfmSize from,
41 const int depth,
42 const int y_off, const int x_off,
43 const uint16_t *const tx_masks)
44 {
45 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[from];
46 const int is_split = (from == (int) TX_4X4 || depth > 1) ? 0 :
47 (tx_masks[depth] >> (y_off * 4 + x_off)) & 1;
48
49 if (is_split) {
50 const enum RectTxfmSize sub = t_dim->sub;
51 const int htw4 = t_dim->w >> 1, hth4 = t_dim->h >> 1;
52
53 decomp_tx(txa, sub, depth + 1, y_off * 2 + 0, x_off * 2 + 0, tx_masks);
54 if (t_dim->w >= t_dim->h)
55 decomp_tx((uint8_t(*)[2][32][32]) &txa[0][0][0][htw4],
56 sub, depth + 1, y_off * 2 + 0, x_off * 2 + 1, tx_masks);
57 if (t_dim->h >= t_dim->w) {
58 decomp_tx((uint8_t(*)[2][32][32]) &txa[0][0][hth4][0],
59 sub, depth + 1, y_off * 2 + 1, x_off * 2 + 0, tx_masks);
60 if (t_dim->w >= t_dim->h)
61 decomp_tx((uint8_t(*)[2][32][32]) &txa[0][0][hth4][htw4],
62 sub, depth + 1, y_off * 2 + 1, x_off * 2 + 1, tx_masks);
63 }
64 } else {
65 const int lw = imin(2, t_dim->lw), lh = imin(2, t_dim->lh);
66
67 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
68 for (int y = 0; y < t_dim->h; y++) { \
69 rep_macro(type, txa[0][0][y], off, mul * lw); \
70 rep_macro(type, txa[1][0][y], off, mul * lh); \
71 txa[0][1][y][0] = t_dim->w; \
72 }
73 case_set_upto16(t_dim->w,,, 0);
74 #undef set_ctx
75 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
76 rep_macro(type, txa[1][1][0], off, mul * t_dim->h)
77 case_set_upto16(t_dim->w,,, 0);
78 #undef set_ctx
79 }
80 }
81
mask_edges_inter(uint16_t (* const masks)[32][3][2],const int by4,const int bx4,const int w4,const int h4,const int skip,const enum RectTxfmSize max_tx,const uint16_t * const tx_masks,uint8_t * const a,uint8_t * const l)82 static inline void mask_edges_inter(uint16_t (*const masks)[32][3][2],
83 const int by4, const int bx4,
84 const int w4, const int h4, const int skip,
85 const enum RectTxfmSize max_tx,
86 const uint16_t *const tx_masks,
87 uint8_t *const a, uint8_t *const l)
88 {
89 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[max_tx];
90 int y, x;
91
92 ALIGN_STK_16(uint8_t, txa, 2 /* edge */, [2 /* txsz, step */][32 /* y */][32 /* x */]);
93 for (int y_off = 0, y = 0; y < h4; y += t_dim->h, y_off++)
94 for (int x_off = 0, x = 0; x < w4; x += t_dim->w, x_off++)
95 decomp_tx((uint8_t(*)[2][32][32]) &txa[0][0][y][x],
96 max_tx, 0, y_off, x_off, tx_masks);
97
98 // left block edge
99 unsigned mask = 1U << by4;
100 for (y = 0; y < h4; y++, mask <<= 1) {
101 const int sidx = mask >= 0x10000;
102 const unsigned smask = mask >> (sidx << 4);
103 masks[0][bx4][imin(txa[0][0][y][0], l[y])][sidx] |= smask;
104 }
105
106 // top block edge
107 for (x = 0, mask = 1U << bx4; x < w4; x++, mask <<= 1) {
108 const int sidx = mask >= 0x10000;
109 const unsigned smask = mask >> (sidx << 4);
110 masks[1][by4][imin(txa[1][0][0][x], a[x])][sidx] |= smask;
111 }
112
113 if (!skip) {
114 // inner (tx) left|right edges
115 for (y = 0, mask = 1U << by4; y < h4; y++, mask <<= 1) {
116 const int sidx = mask >= 0x10000U;
117 const unsigned smask = mask >> (sidx << 4);
118 int ltx = txa[0][0][y][0];
119 int step = txa[0][1][y][0];
120 for (x = step; x < w4; x += step) {
121 const int rtx = txa[0][0][y][x];
122 masks[0][bx4 + x][imin(rtx, ltx)][sidx] |= smask;
123 ltx = rtx;
124 step = txa[0][1][y][x];
125 }
126 }
127
128 // top
129 // inner (tx) --- edges
130 // bottom
131 for (x = 0, mask = 1U << bx4; x < w4; x++, mask <<= 1) {
132 const int sidx = mask >= 0x10000U;
133 const unsigned smask = mask >> (sidx << 4);
134 int ttx = txa[1][0][0][x];
135 int step = txa[1][1][0][x];
136 for (y = step; y < h4; y += step) {
137 const int btx = txa[1][0][y][x];
138 masks[1][by4 + y][imin(ttx, btx)][sidx] |= smask;
139 ttx = btx;
140 step = txa[1][1][y][x];
141 }
142 }
143 }
144
145 for (y = 0; y < h4; y++)
146 l[y] = txa[0][0][y][w4 - 1];
147 memcpy(a, txa[1][0][h4 - 1], w4);
148 }
149
mask_edges_intra(uint16_t (* const masks)[32][3][2],const int by4,const int bx4,const int w4,const int h4,const enum RectTxfmSize tx,uint8_t * const a,uint8_t * const l)150 static inline void mask_edges_intra(uint16_t (*const masks)[32][3][2],
151 const int by4, const int bx4,
152 const int w4, const int h4,
153 const enum RectTxfmSize tx,
154 uint8_t *const a, uint8_t *const l)
155 {
156 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
157 const int twl4 = t_dim->lw, thl4 = t_dim->lh;
158 const int twl4c = imin(2, twl4), thl4c = imin(2, thl4);
159 int y, x;
160
161 // left block edge
162 unsigned mask = 1U << by4;
163 for (y = 0; y < h4; y++, mask <<= 1) {
164 const int sidx = mask >= 0x10000;
165 const unsigned smask = mask >> (sidx << 4);
166 masks[0][bx4][imin(twl4c, l[y])][sidx] |= smask;
167 }
168
169 // top block edge
170 for (x = 0, mask = 1U << bx4; x < w4; x++, mask <<= 1) {
171 const int sidx = mask >= 0x10000;
172 const unsigned smask = mask >> (sidx << 4);
173 masks[1][by4][imin(thl4c, a[x])][sidx] |= smask;
174 }
175
176 // inner (tx) left|right edges
177 const int hstep = t_dim->w;
178 unsigned t = 1U << by4;
179 unsigned inner = (unsigned) ((((uint64_t) t) << h4) - t);
180 unsigned inner1 = inner & 0xffff, inner2 = inner >> 16;
181 for (x = hstep; x < w4; x += hstep) {
182 if (inner1) masks[0][bx4 + x][twl4c][0] |= inner1;
183 if (inner2) masks[0][bx4 + x][twl4c][1] |= inner2;
184 }
185
186 // top
187 // inner (tx) --- edges
188 // bottom
189 const int vstep = t_dim->h;
190 t = 1U << bx4;
191 inner = (unsigned) ((((uint64_t) t) << w4) - t);
192 inner1 = inner & 0xffff;
193 inner2 = inner >> 16;
194 for (y = vstep; y < h4; y += vstep) {
195 if (inner1) masks[1][by4 + y][thl4c][0] |= inner1;
196 if (inner2) masks[1][by4 + y][thl4c][1] |= inner2;
197 }
198
199 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
200 rep_macro(type, a, off, mul * thl4c)
201 #define default_memset(dir, diridx, off, var) \
202 memset(a, thl4c, var)
203 case_set_upto32_with_default(w4,,, 0);
204 #undef default_memset
205 #undef set_ctx
206 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
207 rep_macro(type, l, off, mul * twl4c)
208 #define default_memset(dir, diridx, off, var) \
209 memset(l, twl4c, var)
210 case_set_upto32_with_default(h4,,, 0);
211 #undef default_memset
212 #undef set_ctx
213 }
214
mask_edges_chroma(uint16_t (* const masks)[32][2][2],const int cby4,const int cbx4,const int cw4,const int ch4,const int skip_inter,const enum RectTxfmSize tx,uint8_t * const a,uint8_t * const l,const int ss_hor,const int ss_ver)215 static void mask_edges_chroma(uint16_t (*const masks)[32][2][2],
216 const int cby4, const int cbx4,
217 const int cw4, const int ch4,
218 const int skip_inter,
219 const enum RectTxfmSize tx,
220 uint8_t *const a, uint8_t *const l,
221 const int ss_hor, const int ss_ver)
222 {
223 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
224 const int twl4 = t_dim->lw, thl4 = t_dim->lh;
225 const int twl4c = !!twl4, thl4c = !!thl4;
226 int y, x;
227 const int vbits = 4 - ss_ver, hbits = 4 - ss_hor;
228 const int vmask = 16 >> ss_ver, hmask = 16 >> ss_hor;
229 const unsigned vmax = 1 << vmask, hmax = 1 << hmask;
230
231 // left block edge
232 unsigned mask = 1U << cby4;
233 for (y = 0; y < ch4; y++, mask <<= 1) {
234 const int sidx = mask >= vmax;
235 const unsigned smask = mask >> (sidx << vbits);
236 masks[0][cbx4][imin(twl4c, l[y])][sidx] |= smask;
237 }
238
239 // top block edge
240 for (x = 0, mask = 1U << cbx4; x < cw4; x++, mask <<= 1) {
241 const int sidx = mask >= hmax;
242 const unsigned smask = mask >> (sidx << hbits);
243 masks[1][cby4][imin(thl4c, a[x])][sidx] |= smask;
244 }
245
246 if (!skip_inter) {
247 // inner (tx) left|right edges
248 const int hstep = t_dim->w;
249 unsigned t = 1U << cby4;
250 unsigned inner = (unsigned) ((((uint64_t) t) << ch4) - t);
251 unsigned inner1 = inner & ((1 << vmask) - 1), inner2 = inner >> vmask;
252 for (x = hstep; x < cw4; x += hstep) {
253 if (inner1) masks[0][cbx4 + x][twl4c][0] |= inner1;
254 if (inner2) masks[0][cbx4 + x][twl4c][1] |= inner2;
255 }
256
257 // top
258 // inner (tx) --- edges
259 // bottom
260 const int vstep = t_dim->h;
261 t = 1U << cbx4;
262 inner = (unsigned) ((((uint64_t) t) << cw4) - t);
263 inner1 = inner & ((1 << hmask) - 1), inner2 = inner >> hmask;
264 for (y = vstep; y < ch4; y += vstep) {
265 if (inner1) masks[1][cby4 + y][thl4c][0] |= inner1;
266 if (inner2) masks[1][cby4 + y][thl4c][1] |= inner2;
267 }
268 }
269
270 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
271 rep_macro(type, a, off, mul * thl4c)
272 #define default_memset(dir, diridx, off, var) \
273 memset(a, thl4c, var)
274 case_set_upto32_with_default(cw4,,, 0);
275 #undef default_memset
276 #undef set_ctx
277 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
278 rep_macro(type, l, off, mul * twl4c)
279 #define default_memset(dir, diridx, off, var) \
280 memset(l, twl4c, var)
281 case_set_upto32_with_default(ch4,,, 0);
282 #undef default_memset
283 #undef set_ctx
284 }
285
dav1d_create_lf_mask_intra(Av1Filter * const lflvl,uint8_t (* const level_cache)[4],const ptrdiff_t b4_stride,const uint8_t (* filter_level)[8][2],const int bx,const int by,const int iw,const int ih,const enum BlockSize bs,const enum RectTxfmSize ytx,const enum RectTxfmSize uvtx,const enum Dav1dPixelLayout layout,uint8_t * const ay,uint8_t * const ly,uint8_t * const auv,uint8_t * const luv)286 void dav1d_create_lf_mask_intra(Av1Filter *const lflvl,
287 uint8_t (*const level_cache)[4],
288 const ptrdiff_t b4_stride,
289 const uint8_t (*filter_level)[8][2],
290 const int bx, const int by,
291 const int iw, const int ih,
292 const enum BlockSize bs,
293 const enum RectTxfmSize ytx,
294 const enum RectTxfmSize uvtx,
295 const enum Dav1dPixelLayout layout,
296 uint8_t *const ay, uint8_t *const ly,
297 uint8_t *const auv, uint8_t *const luv)
298 {
299 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
300 const int bw4 = imin(iw - bx, b_dim[0]);
301 const int bh4 = imin(ih - by, b_dim[1]);
302 const int bx4 = bx & 31;
303 const int by4 = by & 31;
304 assert(bw4 >= 0 && bh4 >= 0);
305
306 if (bw4 && bh4) {
307 uint8_t (*level_cache_ptr)[4] = level_cache + by * b4_stride + bx;
308 for (int y = 0; y < bh4; y++) {
309 for (int x = 0; x < bw4; x++) {
310 level_cache_ptr[x][0] = filter_level[0][0][0];
311 level_cache_ptr[x][1] = filter_level[1][0][0];
312 }
313 level_cache_ptr += b4_stride;
314 }
315
316 mask_edges_intra(lflvl->filter_y, by4, bx4, bw4, bh4, ytx, ay, ly);
317 }
318
319 if (!auv) return;
320
321 const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
322 const int ss_hor = layout != DAV1D_PIXEL_LAYOUT_I444;
323 const int cbw4 = imin(((iw + ss_hor) >> ss_hor) - (bx >> ss_hor),
324 (b_dim[0] + ss_hor) >> ss_hor);
325 const int cbh4 = imin(((ih + ss_ver) >> ss_ver) - (by >> ss_ver),
326 (b_dim[1] + ss_ver) >> ss_ver);
327 assert(cbw4 >= 0 && cbh4 >= 0);
328
329 if (!cbw4 || !cbh4) return;
330
331 const int cbx4 = bx4 >> ss_hor;
332 const int cby4 = by4 >> ss_ver;
333
334 uint8_t (*level_cache_ptr)[4] =
335 level_cache + (by >> ss_ver) * b4_stride + (bx >> ss_hor);
336 for (int y = 0; y < cbh4; y++) {
337 for (int x = 0; x < cbw4; x++) {
338 level_cache_ptr[x][2] = filter_level[2][0][0];
339 level_cache_ptr[x][3] = filter_level[3][0][0];
340 }
341 level_cache_ptr += b4_stride;
342 }
343
344 mask_edges_chroma(lflvl->filter_uv, cby4, cbx4, cbw4, cbh4, 0, uvtx,
345 auv, luv, ss_hor, ss_ver);
346 }
347
dav1d_create_lf_mask_inter(Av1Filter * const lflvl,uint8_t (* const level_cache)[4],const ptrdiff_t b4_stride,const uint8_t (* filter_level)[8][2],const int bx,const int by,const int iw,const int ih,const int skip,const enum BlockSize bs,const enum RectTxfmSize max_ytx,const uint16_t * const tx_masks,const enum RectTxfmSize uvtx,const enum Dav1dPixelLayout layout,uint8_t * const ay,uint8_t * const ly,uint8_t * const auv,uint8_t * const luv)348 void dav1d_create_lf_mask_inter(Av1Filter *const lflvl,
349 uint8_t (*const level_cache)[4],
350 const ptrdiff_t b4_stride,
351 const uint8_t (*filter_level)[8][2],
352 const int bx, const int by,
353 const int iw, const int ih,
354 const int skip, const enum BlockSize bs,
355 const enum RectTxfmSize max_ytx,
356 const uint16_t *const tx_masks,
357 const enum RectTxfmSize uvtx,
358 const enum Dav1dPixelLayout layout,
359 uint8_t *const ay, uint8_t *const ly,
360 uint8_t *const auv, uint8_t *const luv)
361 {
362 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
363 const int bw4 = imin(iw - bx, b_dim[0]);
364 const int bh4 = imin(ih - by, b_dim[1]);
365 const int bx4 = bx & 31;
366 const int by4 = by & 31;
367 assert(bw4 >= 0 && bh4 >= 0);
368
369 if (bw4 && bh4) {
370 uint8_t (*level_cache_ptr)[4] = level_cache + by * b4_stride + bx;
371 for (int y = 0; y < bh4; y++) {
372 for (int x = 0; x < bw4; x++) {
373 level_cache_ptr[x][0] = filter_level[0][0][0];
374 level_cache_ptr[x][1] = filter_level[1][0][0];
375 }
376 level_cache_ptr += b4_stride;
377 }
378
379 mask_edges_inter(lflvl->filter_y, by4, bx4, bw4, bh4, skip,
380 max_ytx, tx_masks, ay, ly);
381 }
382
383 if (!auv) return;
384
385 const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
386 const int ss_hor = layout != DAV1D_PIXEL_LAYOUT_I444;
387 const int cbw4 = imin(((iw + ss_hor) >> ss_hor) - (bx >> ss_hor),
388 (b_dim[0] + ss_hor) >> ss_hor);
389 const int cbh4 = imin(((ih + ss_ver) >> ss_ver) - (by >> ss_ver),
390 (b_dim[1] + ss_ver) >> ss_ver);
391 assert(cbw4 >= 0 && cbh4 >= 0);
392
393 if (!cbw4 || !cbh4) return;
394
395 const int cbx4 = bx4 >> ss_hor;
396 const int cby4 = by4 >> ss_ver;
397
398 uint8_t (*level_cache_ptr)[4] =
399 level_cache + (by >> ss_ver) * b4_stride + (bx >> ss_hor);
400 for (int y = 0; y < cbh4; y++) {
401 for (int x = 0; x < cbw4; x++) {
402 level_cache_ptr[x][2] = filter_level[2][0][0];
403 level_cache_ptr[x][3] = filter_level[3][0][0];
404 }
405 level_cache_ptr += b4_stride;
406 }
407
408 mask_edges_chroma(lflvl->filter_uv, cby4, cbx4, cbw4, cbh4, skip, uvtx,
409 auv, luv, ss_hor, ss_ver);
410 }
411
dav1d_calc_eih(Av1FilterLUT * const lim_lut,const int filter_sharpness)412 void dav1d_calc_eih(Av1FilterLUT *const lim_lut, const int filter_sharpness) {
413 // set E/I/H values from loopfilter level
414 const int sharp = filter_sharpness;
415 for (int level = 0; level < 64; level++) {
416 int limit = level;
417
418 if (sharp > 0) {
419 limit >>= (sharp + 3) >> 2;
420 limit = imin(limit, 9 - sharp);
421 }
422 limit = imax(limit, 1);
423
424 lim_lut->i[level] = limit;
425 lim_lut->e[level] = 2 * (level + 2) + limit;
426 }
427 lim_lut->sharp[0] = (sharp + 3) >> 2;
428 lim_lut->sharp[1] = sharp ? 9 - sharp : 0xff;
429 }
430
calc_lf_value(uint8_t (* const lflvl_values)[2],const int base_lvl,const int lf_delta,const int seg_delta,const Dav1dLoopfilterModeRefDeltas * const mr_delta)431 static void calc_lf_value(uint8_t (*const lflvl_values)[2],
432 const int base_lvl, const int lf_delta,
433 const int seg_delta,
434 const Dav1dLoopfilterModeRefDeltas *const mr_delta)
435 {
436 const int base = iclip(iclip(base_lvl + lf_delta, 0, 63) + seg_delta, 0, 63);
437
438 if (!mr_delta) {
439 memset(lflvl_values, base, sizeof(*lflvl_values) * 8);
440 } else {
441 const int sh = base >= 32;
442 lflvl_values[0][0] = lflvl_values[0][1] =
443 iclip(base + (mr_delta->ref_delta[0] * (1 << sh)), 0, 63);
444 for (int r = 1; r < 8; r++) {
445 for (int m = 0; m < 2; m++) {
446 const int delta =
447 mr_delta->mode_delta[m] + mr_delta->ref_delta[r];
448 lflvl_values[r][m] = iclip(base + (delta * (1 << sh)), 0, 63);
449 }
450 }
451 }
452 }
453
calc_lf_value_chroma(uint8_t (* const lflvl_values)[2],const int base_lvl,const int lf_delta,const int seg_delta,const Dav1dLoopfilterModeRefDeltas * const mr_delta)454 static inline void calc_lf_value_chroma(uint8_t (*const lflvl_values)[2],
455 const int base_lvl, const int lf_delta,
456 const int seg_delta,
457 const Dav1dLoopfilterModeRefDeltas *const mr_delta)
458 {
459 if (!base_lvl)
460 memset(lflvl_values, 0, sizeof(*lflvl_values) * 8);
461 else
462 calc_lf_value(lflvl_values, base_lvl, lf_delta, seg_delta, mr_delta);
463 }
464
dav1d_calc_lf_values(uint8_t (* const lflvl_values)[4][8][2],const Dav1dFrameHeader * const hdr,const int8_t lf_delta[4])465 void dav1d_calc_lf_values(uint8_t (*const lflvl_values)[4][8][2],
466 const Dav1dFrameHeader *const hdr,
467 const int8_t lf_delta[4])
468 {
469 const int n_seg = hdr->segmentation.enabled ? 8 : 1;
470
471 if (!hdr->loopfilter.level_y[0] && !hdr->loopfilter.level_y[1]) {
472 memset(lflvl_values, 0, sizeof(*lflvl_values) * n_seg);
473 return;
474 }
475
476 const Dav1dLoopfilterModeRefDeltas *const mr_deltas =
477 hdr->loopfilter.mode_ref_delta_enabled ?
478 &hdr->loopfilter.mode_ref_deltas : NULL;
479 for (int s = 0; s < n_seg; s++) {
480 const Dav1dSegmentationData *const segd =
481 hdr->segmentation.enabled ? &hdr->segmentation.seg_data.d[s] : NULL;
482
483 calc_lf_value(lflvl_values[s][0], hdr->loopfilter.level_y[0],
484 lf_delta[0], segd ? segd->delta_lf_y_v : 0, mr_deltas);
485 calc_lf_value(lflvl_values[s][1], hdr->loopfilter.level_y[1],
486 lf_delta[hdr->delta.lf.multi ? 1 : 0],
487 segd ? segd->delta_lf_y_h : 0, mr_deltas);
488 calc_lf_value_chroma(lflvl_values[s][2], hdr->loopfilter.level_u,
489 lf_delta[hdr->delta.lf.multi ? 2 : 0],
490 segd ? segd->delta_lf_u : 0, mr_deltas);
491 calc_lf_value_chroma(lflvl_values[s][3], hdr->loopfilter.level_v,
492 lf_delta[hdr->delta.lf.multi ? 3 : 0],
493 segd ? segd->delta_lf_v : 0, mr_deltas);
494 }
495 }
496