1 /*
2 * Copyright (c) 2020, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include "av1/common/av1_common_int.h"
13 #include "av1/common/cfl.h"
14 #include "av1/common/reconintra.h"
15
16 #include "av1/encoder/intra_mode_search.h"
17 #include "av1/encoder/intra_mode_search_utils.h"
18 #include "av1/encoder/palette.h"
19 #include "av1/encoder/speed_features.h"
20 #include "av1/encoder/tx_search.h"
21
22 // Even though there are 7 delta angles, this macro is set to 9 to facilitate
23 // the rd threshold check to prune -3 and 3 delta angles.
24 #define SIZE_OF_ANGLE_DELTA_RD_COST_ARRAY (2 * MAX_ANGLE_DELTA + 3)
25
26 // The order for evaluating delta angles while processing the luma directional
27 // intra modes. Currently, this order of evaluation is applicable only when
28 // speed feature prune_luma_odd_delta_angles_in_intra is enabled. In this case,
29 // even angles are evaluated first in order to facilitate the pruning of odd
30 // delta angles based on the rd costs of the neighboring delta angles.
31 static const int8_t luma_delta_angles_order[2 * MAX_ANGLE_DELTA] = {
32 -2, 2, -3, -1, 1, 3,
33 };
34
35 /*!\cond */
36 static const PREDICTION_MODE intra_rd_search_mode_order[INTRA_MODES] = {
37 DC_PRED, H_PRED, V_PRED, SMOOTH_PRED, PAETH_PRED,
38 SMOOTH_V_PRED, SMOOTH_H_PRED, D135_PRED, D203_PRED, D157_PRED,
39 D67_PRED, D113_PRED, D45_PRED,
40 };
41
42 static const UV_PREDICTION_MODE uv_rd_search_mode_order[UV_INTRA_MODES] = {
43 UV_DC_PRED, UV_CFL_PRED, UV_H_PRED, UV_V_PRED,
44 UV_SMOOTH_PRED, UV_PAETH_PRED, UV_SMOOTH_V_PRED, UV_SMOOTH_H_PRED,
45 UV_D135_PRED, UV_D203_PRED, UV_D157_PRED, UV_D67_PRED,
46 UV_D113_PRED, UV_D45_PRED,
47 };
48
49 // The bitmask corresponds to the filter intra modes as defined in enums.h
50 // FILTER_INTRA_MODE enumeration type. Setting a bit to 0 in the mask means to
51 // disable the evaluation of corresponding filter intra mode. The table
52 // av1_derived_filter_intra_mode_used_flag is used when speed feature
53 // prune_filter_intra_level is 1. The evaluated filter intra modes are union
54 // of the following:
55 // 1) FILTER_DC_PRED
56 // 2) mode that corresponds to best mode so far of DC_PRED, V_PRED, H_PRED,
57 // D157_PRED and PAETH_PRED. (Eg: FILTER_V_PRED if best mode so far is V_PRED).
58 static const uint8_t av1_derived_filter_intra_mode_used_flag[INTRA_MODES] = {
59 0x01, // DC_PRED: 0000 0001
60 0x03, // V_PRED: 0000 0011
61 0x05, // H_PRED: 0000 0101
62 0x01, // D45_PRED: 0000 0001
63 0x01, // D135_PRED: 0000 0001
64 0x01, // D113_PRED: 0000 0001
65 0x09, // D157_PRED: 0000 1001
66 0x01, // D203_PRED: 0000 0001
67 0x01, // D67_PRED: 0000 0001
68 0x01, // SMOOTH_PRED: 0000 0001
69 0x01, // SMOOTH_V_PRED: 0000 0001
70 0x01, // SMOOTH_H_PRED: 0000 0001
71 0x11 // PAETH_PRED: 0001 0001
72 };
73
74 // The bitmask corresponds to the chroma intra modes as defined in enums.h
75 // UV_PREDICTION_MODE enumeration type. Setting a bit to 0 in the mask means to
76 // disable the evaluation of corresponding chroma intra mode. The table
77 // av1_derived_chroma_intra_mode_used_flag is used when speed feature
78 // prune_chroma_modes_using_luma_winner is enabled. The evaluated chroma
79 // intra modes are union of the following:
80 // 1) UV_DC_PRED
81 // 2) UV_SMOOTH_PRED
82 // 3) UV_CFL_PRED
83 // 4) mode that corresponds to luma intra mode winner (Eg : UV_V_PRED if luma
84 // intra mode winner is V_PRED).
85 static const uint16_t av1_derived_chroma_intra_mode_used_flag[INTRA_MODES] = {
86 0x2201, // DC_PRED: 0010 0010 0000 0001
87 0x2203, // V_PRED: 0010 0010 0000 0011
88 0x2205, // H_PRED: 0010 0010 0000 0101
89 0x2209, // D45_PRED: 0010 0010 0000 1001
90 0x2211, // D135_PRED: 0010 0010 0001 0001
91 0x2221, // D113_PRED: 0010 0010 0010 0001
92 0x2241, // D157_PRED: 0010 0010 0100 0001
93 0x2281, // D203_PRED: 0010 0010 1000 0001
94 0x2301, // D67_PRED: 0010 0011 0000 0001
95 0x2201, // SMOOTH_PRED: 0010 0010 0000 0001
96 0x2601, // SMOOTH_V_PRED: 0010 0110 0000 0001
97 0x2a01, // SMOOTH_H_PRED: 0010 1010 0000 0001
98 0x3201 // PAETH_PRED: 0011 0010 0000 0001
99 };
100
101 DECLARE_ALIGNED(16, static const uint8_t, all_zeros[MAX_SB_SIZE]) = { 0 };
102 DECLARE_ALIGNED(16, static const uint16_t,
103 highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
104
av1_calc_normalized_variance(aom_variance_fn_t vf,const uint8_t * const buf,const int stride,const int is_hbd)105 int av1_calc_normalized_variance(aom_variance_fn_t vf, const uint8_t *const buf,
106 const int stride, const int is_hbd) {
107 unsigned int sse;
108
109 if (is_hbd)
110 return vf(buf, stride, CONVERT_TO_BYTEPTR(highbd_all_zeros), 0, &sse);
111 else
112 return vf(buf, stride, all_zeros, 0, &sse);
113 }
114
115 // Computes average of log(1 + variance) across 4x4 sub-blocks for source and
116 // reconstructed blocks.
compute_avg_log_variance(const AV1_COMP * const cpi,MACROBLOCK * x,const BLOCK_SIZE bs,double * avg_log_src_variance,double * avg_log_recon_variance)117 static void compute_avg_log_variance(const AV1_COMP *const cpi, MACROBLOCK *x,
118 const BLOCK_SIZE bs,
119 double *avg_log_src_variance,
120 double *avg_log_recon_variance) {
121 const MACROBLOCKD *const xd = &x->e_mbd;
122 const BLOCK_SIZE sb_size = cpi->common.seq_params->sb_size;
123 const int mi_row_in_sb = x->e_mbd.mi_row & (mi_size_high[sb_size] - 1);
124 const int mi_col_in_sb = x->e_mbd.mi_col & (mi_size_wide[sb_size] - 1);
125 const int right_overflow =
126 (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
127 const int bottom_overflow =
128 (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
129 const int bw = (MI_SIZE * mi_size_wide[bs] - right_overflow);
130 const int bh = (MI_SIZE * mi_size_high[bs] - bottom_overflow);
131 const int is_hbd = is_cur_buf_hbd(xd);
132
133 for (int i = 0; i < bh; i += MI_SIZE) {
134 const int r = mi_row_in_sb + (i >> MI_SIZE_LOG2);
135 for (int j = 0; j < bw; j += MI_SIZE) {
136 const int c = mi_col_in_sb + (j >> MI_SIZE_LOG2);
137 const int mi_offset = r * mi_size_wide[sb_size] + c;
138 Block4x4VarInfo *block_4x4_var_info =
139 &x->src_var_info_of_4x4_sub_blocks[mi_offset];
140 int src_var = block_4x4_var_info->var;
141 double log_src_var = block_4x4_var_info->log_var;
142 // Compute average of log(1 + variance) for the source block from 4x4
143 // sub-block variance values. Calculate and store 4x4 sub-block variance
144 // and log(1 + variance), if the values present in
145 // src_var_of_4x4_sub_blocks are invalid. Reuse the same if it is readily
146 // available with valid values.
147 if (src_var < 0) {
148 src_var = av1_calc_normalized_variance(
149 cpi->ppi->fn_ptr[BLOCK_4X4].vf,
150 x->plane[0].src.buf + i * x->plane[0].src.stride + j,
151 x->plane[0].src.stride, is_hbd);
152 block_4x4_var_info->var = src_var;
153 log_src_var = log1p(src_var / 16.0);
154 block_4x4_var_info->log_var = log_src_var;
155 } else {
156 // When source variance is already calculated and available for
157 // retrieval, check if log(1 + variance) is also available. If it is
158 // available, then retrieve from buffer. Else, calculate the same and
159 // store to the buffer.
160 if (log_src_var < 0) {
161 log_src_var = log1p(src_var / 16.0);
162 block_4x4_var_info->log_var = log_src_var;
163 }
164 }
165 *avg_log_src_variance += log_src_var;
166
167 const int recon_var = av1_calc_normalized_variance(
168 cpi->ppi->fn_ptr[BLOCK_4X4].vf,
169 xd->plane[0].dst.buf + i * xd->plane[0].dst.stride + j,
170 xd->plane[0].dst.stride, is_hbd);
171 *avg_log_recon_variance += log1p(recon_var / 16.0);
172 }
173 }
174
175 const int blocks = (bw * bh) / 16;
176 *avg_log_src_variance /= (double)blocks;
177 *avg_log_recon_variance /= (double)blocks;
178 }
179
180 // Returns a factor to be applied to the RD value based on how well the
181 // reconstructed block variance matches the source variance.
intra_rd_variance_factor(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bs)182 static double intra_rd_variance_factor(const AV1_COMP *cpi, MACROBLOCK *x,
183 BLOCK_SIZE bs) {
184 double threshold = INTRA_RD_VAR_THRESH(cpi->oxcf.speed);
185 // For non-positive threshold values, the comparison of source and
186 // reconstructed variances with threshold evaluates to false
187 // (src_var < threshold/rec_var < threshold) as these metrics are greater than
188 // than 0. Hence further calculations are skipped.
189 if (threshold <= 0) return 1.0;
190
191 double variance_rd_factor = 1.0;
192 double avg_log_src_variance = 0.0;
193 double avg_log_recon_variance = 0.0;
194 double var_diff = 0.0;
195
196 compute_avg_log_variance(cpi, x, bs, &avg_log_src_variance,
197 &avg_log_recon_variance);
198
199 // Dont allow 0 to prevent / 0 below.
200 avg_log_src_variance += 0.000001;
201 avg_log_recon_variance += 0.000001;
202
203 if (avg_log_src_variance >= avg_log_recon_variance) {
204 var_diff = (avg_log_src_variance - avg_log_recon_variance);
205 if ((var_diff > 0.5) && (avg_log_recon_variance < threshold)) {
206 variance_rd_factor = 1.0 + ((var_diff * 2) / avg_log_src_variance);
207 }
208 } else {
209 var_diff = (avg_log_recon_variance - avg_log_src_variance);
210 if ((var_diff > 0.5) && (avg_log_src_variance < threshold)) {
211 variance_rd_factor = 1.0 + (var_diff / (2 * avg_log_src_variance));
212 }
213 }
214
215 // Limit adjustment;
216 variance_rd_factor = AOMMIN(3.0, variance_rd_factor);
217
218 return variance_rd_factor;
219 }
220 /*!\endcond */
221
222 /*!\brief Search for the best filter_intra mode when coding intra frame.
223 *
224 * \ingroup intra_mode_search
225 * \callergraph
226 * This function loops through all filter_intra modes to find the best one.
227 *
228 * \return Returns 1 if a new filter_intra mode is selected; 0 otherwise.
229 */
rd_pick_filter_intra_sby(const AV1_COMP * const cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,uint8_t * skippable,BLOCK_SIZE bsize,int mode_cost,PREDICTION_MODE best_mode_so_far,int64_t * best_rd,int64_t * best_model_rd,PICK_MODE_CONTEXT * ctx)230 static int rd_pick_filter_intra_sby(const AV1_COMP *const cpi, MACROBLOCK *x,
231 int *rate, int *rate_tokenonly,
232 int64_t *distortion, uint8_t *skippable,
233 BLOCK_SIZE bsize, int mode_cost,
234 PREDICTION_MODE best_mode_so_far,
235 int64_t *best_rd, int64_t *best_model_rd,
236 PICK_MODE_CONTEXT *ctx) {
237 // Skip the evaluation of filter intra modes.
238 if (cpi->sf.intra_sf.prune_filter_intra_level == 2) return 0;
239
240 MACROBLOCKD *const xd = &x->e_mbd;
241 MB_MODE_INFO *mbmi = xd->mi[0];
242 int filter_intra_selected_flag = 0;
243 FILTER_INTRA_MODE mode;
244 TX_SIZE best_tx_size = TX_8X8;
245 FILTER_INTRA_MODE_INFO filter_intra_mode_info;
246 uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
247 av1_zero(filter_intra_mode_info);
248 mbmi->filter_intra_mode_info.use_filter_intra = 1;
249 mbmi->mode = DC_PRED;
250 mbmi->palette_mode_info.palette_size[0] = 0;
251
252 // Skip the evaluation of filter-intra if cached MB_MODE_INFO does not have
253 // filter-intra as winner.
254 if (x->use_mb_mode_cache &&
255 !x->mb_mode_cache->filter_intra_mode_info.use_filter_intra)
256 return 0;
257
258 for (mode = 0; mode < FILTER_INTRA_MODES; ++mode) {
259 int64_t this_rd;
260 RD_STATS tokenonly_rd_stats;
261 mbmi->filter_intra_mode_info.filter_intra_mode = mode;
262
263 if ((cpi->sf.intra_sf.prune_filter_intra_level == 1) &&
264 !(av1_derived_filter_intra_mode_used_flag[best_mode_so_far] &
265 (1 << mode)))
266 continue;
267
268 // Skip the evaluation of modes that do not match with the winner mode in
269 // x->mb_mode_cache.
270 if (x->use_mb_mode_cache &&
271 mode != x->mb_mode_cache->filter_intra_mode_info.filter_intra_mode)
272 continue;
273
274 if (model_intra_yrd_and_prune(cpi, x, bsize, best_model_rd)) {
275 continue;
276 }
277 av1_pick_uniform_tx_size_type_yrd(cpi, x, &tokenonly_rd_stats, bsize,
278 *best_rd);
279 if (tokenonly_rd_stats.rate == INT_MAX) continue;
280 const int this_rate =
281 tokenonly_rd_stats.rate +
282 intra_mode_info_cost_y(cpi, x, mbmi, bsize, mode_cost, 0);
283 this_rd = RDCOST(x->rdmult, this_rate, tokenonly_rd_stats.dist);
284
285 // Visual quality adjustment based on recon vs source variance.
286 if ((cpi->oxcf.mode == ALLINTRA) && (this_rd != INT64_MAX)) {
287 this_rd = (int64_t)(this_rd * intra_rd_variance_factor(cpi, x, bsize));
288 }
289
290 // Collect mode stats for multiwinner mode processing
291 const int txfm_search_done = 1;
292 store_winner_mode_stats(
293 &cpi->common, x, mbmi, NULL, NULL, NULL, 0, NULL, bsize, this_rd,
294 cpi->sf.winner_mode_sf.multi_winner_mode_type, txfm_search_done);
295 if (this_rd < *best_rd) {
296 *best_rd = this_rd;
297 best_tx_size = mbmi->tx_size;
298 filter_intra_mode_info = mbmi->filter_intra_mode_info;
299 av1_copy_array(best_tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
300 memcpy(ctx->blk_skip, x->txfm_search_info.blk_skip,
301 sizeof(x->txfm_search_info.blk_skip[0]) * ctx->num_4x4_blk);
302 *rate = this_rate;
303 *rate_tokenonly = tokenonly_rd_stats.rate;
304 *distortion = tokenonly_rd_stats.dist;
305 *skippable = tokenonly_rd_stats.skip_txfm;
306 filter_intra_selected_flag = 1;
307 }
308 }
309
310 if (filter_intra_selected_flag) {
311 mbmi->mode = DC_PRED;
312 mbmi->tx_size = best_tx_size;
313 mbmi->filter_intra_mode_info = filter_intra_mode_info;
314 av1_copy_array(ctx->tx_type_map, best_tx_type_map, ctx->num_4x4_blk);
315 return 1;
316 } else {
317 return 0;
318 }
319 }
320
av1_count_colors(const uint8_t * src,int stride,int rows,int cols,int * val_count,int * num_colors)321 void av1_count_colors(const uint8_t *src, int stride, int rows, int cols,
322 int *val_count, int *num_colors) {
323 const int max_pix_val = 1 << 8;
324 memset(val_count, 0, max_pix_val * sizeof(val_count[0]));
325 for (int r = 0; r < rows; ++r) {
326 for (int c = 0; c < cols; ++c) {
327 const int this_val = src[r * stride + c];
328 assert(this_val < max_pix_val);
329 ++val_count[this_val];
330 }
331 }
332 int n = 0;
333 for (int i = 0; i < max_pix_val; ++i) {
334 if (val_count[i]) ++n;
335 }
336 *num_colors = n;
337 }
338
av1_count_colors_highbd(const uint8_t * src8,int stride,int rows,int cols,int bit_depth,int * val_count,int * bin_val_count,int * num_color_bins,int * num_colors)339 void av1_count_colors_highbd(const uint8_t *src8, int stride, int rows,
340 int cols, int bit_depth, int *val_count,
341 int *bin_val_count, int *num_color_bins,
342 int *num_colors) {
343 assert(bit_depth <= 12);
344 const int max_bin_val = 1 << 8;
345 const int max_pix_val = 1 << bit_depth;
346 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
347 memset(bin_val_count, 0, max_bin_val * sizeof(val_count[0]));
348 if (val_count != NULL)
349 memset(val_count, 0, max_pix_val * sizeof(val_count[0]));
350 for (int r = 0; r < rows; ++r) {
351 for (int c = 0; c < cols; ++c) {
352 /*
353 * Down-convert the pixels to 8-bit domain before counting.
354 * This provides consistency of behavior for palette search
355 * between lbd and hbd encodes. This down-converted pixels
356 * are only used for calculating the threshold (n).
357 */
358 const int this_val = ((src[r * stride + c]) >> (bit_depth - 8));
359 assert(this_val < max_bin_val);
360 if (this_val >= max_bin_val) continue;
361 ++bin_val_count[this_val];
362 if (val_count != NULL) ++val_count[(src[r * stride + c])];
363 }
364 }
365 int n = 0;
366 // Count the colors based on 8-bit domain used to gate the palette path
367 for (int i = 0; i < max_bin_val; ++i) {
368 if (bin_val_count[i]) ++n;
369 }
370 *num_color_bins = n;
371
372 // Count the actual hbd colors used to create top_colors
373 n = 0;
374 if (val_count != NULL) {
375 for (int i = 0; i < max_pix_val; ++i) {
376 if (val_count[i]) ++n;
377 }
378 *num_colors = n;
379 }
380 }
381
set_y_mode_and_delta_angle(const int mode_idx,MB_MODE_INFO * const mbmi,int reorder_delta_angle_eval)382 void set_y_mode_and_delta_angle(const int mode_idx, MB_MODE_INFO *const mbmi,
383 int reorder_delta_angle_eval) {
384 if (mode_idx < INTRA_MODE_END) {
385 mbmi->mode = intra_rd_search_mode_order[mode_idx];
386 mbmi->angle_delta[PLANE_TYPE_Y] = 0;
387 } else {
388 mbmi->mode = (mode_idx - INTRA_MODE_END) / (MAX_ANGLE_DELTA * 2) + V_PRED;
389 int delta_angle_eval_idx =
390 (mode_idx - INTRA_MODE_END) % (MAX_ANGLE_DELTA * 2);
391 if (reorder_delta_angle_eval) {
392 mbmi->angle_delta[PLANE_TYPE_Y] =
393 luma_delta_angles_order[delta_angle_eval_idx];
394 } else {
395 mbmi->angle_delta[PLANE_TYPE_Y] =
396 (delta_angle_eval_idx < 3 ? (delta_angle_eval_idx - 3)
397 : (delta_angle_eval_idx - 2));
398 }
399 }
400 }
401
get_model_rd_index_for_pruning(const MACROBLOCK * const x,const INTRA_MODE_SPEED_FEATURES * const intra_sf)402 static AOM_INLINE int get_model_rd_index_for_pruning(
403 const MACROBLOCK *const x,
404 const INTRA_MODE_SPEED_FEATURES *const intra_sf) {
405 const int top_intra_model_count_allowed =
406 intra_sf->top_intra_model_count_allowed;
407 if (!intra_sf->adapt_top_model_rd_count_using_neighbors)
408 return top_intra_model_count_allowed - 1;
409
410 const MACROBLOCKD *const xd = &x->e_mbd;
411 const PREDICTION_MODE mode = xd->mi[0]->mode;
412 int model_rd_index_for_pruning = top_intra_model_count_allowed - 1;
413 int is_left_mode_neq_cur_mode = 0, is_above_mode_neq_cur_mode = 0;
414 if (xd->left_available)
415 is_left_mode_neq_cur_mode = xd->left_mbmi->mode != mode;
416 if (xd->up_available)
417 is_above_mode_neq_cur_mode = xd->above_mbmi->mode != mode;
418 // The pruning of luma intra modes is made more aggressive at lower quantizers
419 // and vice versa. The value for model_rd_index_for_pruning is derived as
420 // follows.
421 // qidx 0 to 127: Reduce the index of a candidate used for comparison only if
422 // the current mode does not match either of the available neighboring modes.
423 // qidx 128 to 255: Reduce the index of a candidate used for comparison only
424 // if the current mode does not match both the available neighboring modes.
425 if (x->qindex <= 127) {
426 if (is_left_mode_neq_cur_mode || is_above_mode_neq_cur_mode)
427 model_rd_index_for_pruning = AOMMAX(model_rd_index_for_pruning - 1, 0);
428 } else {
429 if (is_left_mode_neq_cur_mode && is_above_mode_neq_cur_mode)
430 model_rd_index_for_pruning = AOMMAX(model_rd_index_for_pruning - 1, 0);
431 }
432 return model_rd_index_for_pruning;
433 }
434
prune_intra_y_mode(int64_t this_model_rd,int64_t * best_model_rd,int64_t top_intra_model_rd[],int max_model_cnt_allowed,int model_rd_index_for_pruning)435 int prune_intra_y_mode(int64_t this_model_rd, int64_t *best_model_rd,
436 int64_t top_intra_model_rd[], int max_model_cnt_allowed,
437 int model_rd_index_for_pruning) {
438 const double thresh_best = 1.50;
439 const double thresh_top = 1.00;
440 for (int i = 0; i < max_model_cnt_allowed; i++) {
441 if (this_model_rd < top_intra_model_rd[i]) {
442 for (int j = max_model_cnt_allowed - 1; j > i; j--) {
443 top_intra_model_rd[j] = top_intra_model_rd[j - 1];
444 }
445 top_intra_model_rd[i] = this_model_rd;
446 break;
447 }
448 }
449 if (top_intra_model_rd[model_rd_index_for_pruning] != INT64_MAX &&
450 this_model_rd >
451 thresh_top * top_intra_model_rd[model_rd_index_for_pruning])
452 return 1;
453
454 if (this_model_rd != INT64_MAX &&
455 this_model_rd > thresh_best * (*best_model_rd))
456 return 1;
457 if (this_model_rd < *best_model_rd) *best_model_rd = this_model_rd;
458 return 0;
459 }
460
461 // Run RD calculation with given chroma intra prediction angle., and return
462 // the RD cost. Update the best mode info. if the RD cost is the best so far.
pick_intra_angle_routine_sbuv(const AV1_COMP * const cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int rate_overhead,int64_t best_rd_in,int * rate,RD_STATS * rd_stats,int * best_angle_delta,int64_t * best_rd)463 static int64_t pick_intra_angle_routine_sbuv(
464 const AV1_COMP *const cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
465 int rate_overhead, int64_t best_rd_in, int *rate, RD_STATS *rd_stats,
466 int *best_angle_delta, int64_t *best_rd) {
467 MB_MODE_INFO *mbmi = x->e_mbd.mi[0];
468 assert(!is_inter_block(mbmi));
469 int this_rate;
470 int64_t this_rd;
471 RD_STATS tokenonly_rd_stats;
472
473 if (!av1_txfm_uvrd(cpi, x, &tokenonly_rd_stats, bsize, best_rd_in))
474 return INT64_MAX;
475 this_rate = tokenonly_rd_stats.rate +
476 intra_mode_info_cost_uv(cpi, x, mbmi, bsize, rate_overhead);
477 this_rd = RDCOST(x->rdmult, this_rate, tokenonly_rd_stats.dist);
478 if (this_rd < *best_rd) {
479 *best_rd = this_rd;
480 *best_angle_delta = mbmi->angle_delta[PLANE_TYPE_UV];
481 *rate = this_rate;
482 rd_stats->rate = tokenonly_rd_stats.rate;
483 rd_stats->dist = tokenonly_rd_stats.dist;
484 rd_stats->skip_txfm = tokenonly_rd_stats.skip_txfm;
485 }
486 return this_rd;
487 }
488
489 /*!\brief Search for the best angle delta for chroma prediction
490 *
491 * \ingroup intra_mode_search
492 * \callergraph
493 * Given a chroma directional intra prediction mode, this function will try to
494 * estimate the best delta_angle.
495 *
496 * \returns Return if there is a new mode with smaller rdcost than best_rd.
497 */
rd_pick_intra_angle_sbuv(const AV1_COMP * const cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int rate_overhead,int64_t best_rd,int * rate,RD_STATS * rd_stats)498 static int rd_pick_intra_angle_sbuv(const AV1_COMP *const cpi, MACROBLOCK *x,
499 BLOCK_SIZE bsize, int rate_overhead,
500 int64_t best_rd, int *rate,
501 RD_STATS *rd_stats) {
502 MACROBLOCKD *const xd = &x->e_mbd;
503 MB_MODE_INFO *mbmi = xd->mi[0];
504 assert(!is_inter_block(mbmi));
505 int i, angle_delta, best_angle_delta = 0;
506 int64_t this_rd, best_rd_in, rd_cost[2 * (MAX_ANGLE_DELTA + 2)];
507
508 rd_stats->rate = INT_MAX;
509 rd_stats->skip_txfm = 0;
510 rd_stats->dist = INT64_MAX;
511 for (i = 0; i < 2 * (MAX_ANGLE_DELTA + 2); ++i) rd_cost[i] = INT64_MAX;
512
513 for (angle_delta = 0; angle_delta <= MAX_ANGLE_DELTA; angle_delta += 2) {
514 for (i = 0; i < 2; ++i) {
515 best_rd_in = (best_rd == INT64_MAX)
516 ? INT64_MAX
517 : (best_rd + (best_rd >> ((angle_delta == 0) ? 3 : 5)));
518 mbmi->angle_delta[PLANE_TYPE_UV] = (1 - 2 * i) * angle_delta;
519 this_rd = pick_intra_angle_routine_sbuv(cpi, x, bsize, rate_overhead,
520 best_rd_in, rate, rd_stats,
521 &best_angle_delta, &best_rd);
522 rd_cost[2 * angle_delta + i] = this_rd;
523 if (angle_delta == 0) {
524 if (this_rd == INT64_MAX) return 0;
525 rd_cost[1] = this_rd;
526 break;
527 }
528 }
529 }
530
531 assert(best_rd != INT64_MAX);
532 for (angle_delta = 1; angle_delta <= MAX_ANGLE_DELTA; angle_delta += 2) {
533 int64_t rd_thresh;
534 for (i = 0; i < 2; ++i) {
535 int skip_search = 0;
536 rd_thresh = best_rd + (best_rd >> 5);
537 if (rd_cost[2 * (angle_delta + 1) + i] > rd_thresh &&
538 rd_cost[2 * (angle_delta - 1) + i] > rd_thresh)
539 skip_search = 1;
540 if (!skip_search) {
541 mbmi->angle_delta[PLANE_TYPE_UV] = (1 - 2 * i) * angle_delta;
542 pick_intra_angle_routine_sbuv(cpi, x, bsize, rate_overhead, best_rd,
543 rate, rd_stats, &best_angle_delta,
544 &best_rd);
545 }
546 }
547 }
548
549 mbmi->angle_delta[PLANE_TYPE_UV] = best_angle_delta;
550 return rd_stats->rate != INT_MAX;
551 }
552
553 #define PLANE_SIGN_TO_JOINT_SIGN(plane, a, b) \
554 (plane == CFL_PRED_U ? a * CFL_SIGNS + b - 1 : b * CFL_SIGNS + a - 1)
555
cfl_idx_to_sign_and_alpha(int cfl_idx,CFL_SIGN_TYPE * cfl_sign,int * cfl_alpha)556 static void cfl_idx_to_sign_and_alpha(int cfl_idx, CFL_SIGN_TYPE *cfl_sign,
557 int *cfl_alpha) {
558 int cfl_linear_idx = cfl_idx - CFL_INDEX_ZERO;
559 if (cfl_linear_idx == 0) {
560 *cfl_sign = CFL_SIGN_ZERO;
561 *cfl_alpha = 0;
562 } else {
563 *cfl_sign = cfl_linear_idx > 0 ? CFL_SIGN_POS : CFL_SIGN_NEG;
564 *cfl_alpha = abs(cfl_linear_idx) - 1;
565 }
566 }
567
cfl_compute_rd(const AV1_COMP * const cpi,MACROBLOCK * x,int plane,TX_SIZE tx_size,BLOCK_SIZE plane_bsize,int cfl_idx,int fast_mode,RD_STATS * rd_stats)568 static int64_t cfl_compute_rd(const AV1_COMP *const cpi, MACROBLOCK *x,
569 int plane, TX_SIZE tx_size,
570 BLOCK_SIZE plane_bsize, int cfl_idx,
571 int fast_mode, RD_STATS *rd_stats) {
572 assert(IMPLIES(fast_mode, rd_stats == NULL));
573 const AV1_COMMON *const cm = &cpi->common;
574 MACROBLOCKD *const xd = &x->e_mbd;
575 MB_MODE_INFO *const mbmi = xd->mi[0];
576 int cfl_plane = get_cfl_pred_type(plane);
577 CFL_SIGN_TYPE cfl_sign;
578 int cfl_alpha;
579 cfl_idx_to_sign_and_alpha(cfl_idx, &cfl_sign, &cfl_alpha);
580 // We conly build CFL for a given plane, the other plane's sign is dummy
581 int dummy_sign = CFL_SIGN_NEG;
582 const int8_t orig_cfl_alpha_signs = mbmi->cfl_alpha_signs;
583 const uint8_t orig_cfl_alpha_idx = mbmi->cfl_alpha_idx;
584 mbmi->cfl_alpha_signs =
585 PLANE_SIGN_TO_JOINT_SIGN(cfl_plane, cfl_sign, dummy_sign);
586 mbmi->cfl_alpha_idx = (cfl_alpha << CFL_ALPHABET_SIZE_LOG2) + cfl_alpha;
587 int64_t cfl_cost;
588 if (fast_mode) {
589 cfl_cost =
590 intra_model_rd(cm, x, plane, plane_bsize, tx_size, /*use_hadamard=*/0);
591 } else {
592 av1_init_rd_stats(rd_stats);
593 av1_txfm_rd_in_plane(x, cpi, rd_stats, INT64_MAX, 0, plane, plane_bsize,
594 tx_size, FTXS_NONE, 0);
595 av1_rd_cost_update(x->rdmult, rd_stats);
596 cfl_cost = rd_stats->rdcost;
597 }
598 mbmi->cfl_alpha_signs = orig_cfl_alpha_signs;
599 mbmi->cfl_alpha_idx = orig_cfl_alpha_idx;
600 return cfl_cost;
601 }
602
603 static const int cfl_dir_ls[2] = { 1, -1 };
604
605 // If cfl_search_range is CFL_MAGS_SIZE, return zero. Otherwise return the index
606 // of the best alpha found using intra_model_rd().
cfl_pick_plane_parameter(const AV1_COMP * const cpi,MACROBLOCK * x,int plane,TX_SIZE tx_size,int cfl_search_range)607 static int cfl_pick_plane_parameter(const AV1_COMP *const cpi, MACROBLOCK *x,
608 int plane, TX_SIZE tx_size,
609 int cfl_search_range) {
610 assert(cfl_search_range >= 1 && cfl_search_range <= CFL_MAGS_SIZE);
611
612 if (cfl_search_range == CFL_MAGS_SIZE) return CFL_INDEX_ZERO;
613
614 const MACROBLOCKD *const xd = &x->e_mbd;
615 const MB_MODE_INFO *const mbmi = xd->mi[0];
616 assert(mbmi->uv_mode == UV_CFL_PRED);
617 const MACROBLOCKD_PLANE *pd = &xd->plane[plane];
618 const BLOCK_SIZE plane_bsize =
619 get_plane_block_size(mbmi->bsize, pd->subsampling_x, pd->subsampling_y);
620
621 int est_best_cfl_idx = CFL_INDEX_ZERO;
622 int fast_mode = 1;
623 int start_cfl_idx = CFL_INDEX_ZERO;
624 int64_t best_cfl_cost = cfl_compute_rd(cpi, x, plane, tx_size, plane_bsize,
625 start_cfl_idx, fast_mode, NULL);
626 for (int si = 0; si < 2; ++si) {
627 const int dir = cfl_dir_ls[si];
628 for (int i = 1; i < CFL_MAGS_SIZE; ++i) {
629 int cfl_idx = start_cfl_idx + dir * i;
630 if (cfl_idx < 0 || cfl_idx >= CFL_MAGS_SIZE) break;
631 int64_t cfl_cost = cfl_compute_rd(cpi, x, plane, tx_size, plane_bsize,
632 cfl_idx, fast_mode, NULL);
633 if (cfl_cost < best_cfl_cost) {
634 best_cfl_cost = cfl_cost;
635 est_best_cfl_idx = cfl_idx;
636 } else {
637 break;
638 }
639 }
640 }
641 return est_best_cfl_idx;
642 }
643
set_invalid_cfl_parameters(uint8_t * best_cfl_alpha_idx,int8_t * best_cfl_alpha_signs)644 static AOM_INLINE void set_invalid_cfl_parameters(
645 uint8_t *best_cfl_alpha_idx, int8_t *best_cfl_alpha_signs) {
646 *best_cfl_alpha_idx = 0;
647 *best_cfl_alpha_signs = 0;
648 }
649
cfl_pick_plane_rd(const AV1_COMP * const cpi,MACROBLOCK * x,int plane,TX_SIZE tx_size,int cfl_search_range,RD_STATS cfl_rd_arr[CFL_MAGS_SIZE],int est_best_cfl_idx)650 static void cfl_pick_plane_rd(const AV1_COMP *const cpi, MACROBLOCK *x,
651 int plane, TX_SIZE tx_size, int cfl_search_range,
652 RD_STATS cfl_rd_arr[CFL_MAGS_SIZE],
653 int est_best_cfl_idx) {
654 assert(cfl_search_range >= 1 && cfl_search_range <= CFL_MAGS_SIZE);
655 const MACROBLOCKD *const xd = &x->e_mbd;
656 const MB_MODE_INFO *const mbmi = xd->mi[0];
657 assert(mbmi->uv_mode == UV_CFL_PRED);
658 const MACROBLOCKD_PLANE *pd = &xd->plane[plane];
659 const BLOCK_SIZE plane_bsize =
660 get_plane_block_size(mbmi->bsize, pd->subsampling_x, pd->subsampling_y);
661
662 for (int cfl_idx = 0; cfl_idx < CFL_MAGS_SIZE; ++cfl_idx) {
663 av1_invalid_rd_stats(&cfl_rd_arr[cfl_idx]);
664 }
665
666 int fast_mode = 0;
667 int start_cfl_idx = est_best_cfl_idx;
668 cfl_compute_rd(cpi, x, plane, tx_size, plane_bsize, start_cfl_idx, fast_mode,
669 &cfl_rd_arr[start_cfl_idx]);
670
671 if (cfl_search_range == 1) return;
672
673 for (int si = 0; si < 2; ++si) {
674 const int dir = cfl_dir_ls[si];
675 for (int i = 1; i < cfl_search_range; ++i) {
676 int cfl_idx = start_cfl_idx + dir * i;
677 if (cfl_idx < 0 || cfl_idx >= CFL_MAGS_SIZE) break;
678 cfl_compute_rd(cpi, x, plane, tx_size, plane_bsize, cfl_idx, fast_mode,
679 &cfl_rd_arr[cfl_idx]);
680 }
681 }
682 }
683
684 /*!\brief Pick the optimal parameters for Chroma to Luma (CFL) component
685 *
686 * \ingroup intra_mode_search
687 * \callergraph
688 *
689 * This function will use DCT_DCT followed by computing SATD (sum of absolute
690 * transformed differences) to estimate the RD score and find the best possible
691 * CFL parameter.
692 *
693 * Then the function will apply a full RD search near the best possible CFL
694 * parameter to find the best actual CFL parameter.
695 *
696 * Side effect:
697 * We use ths buffers in x->plane[] and xd->plane[] as throw-away buffers for RD
698 * search.
699 *
700 * \param[in] x Encoder prediction block structure.
701 * \param[in] cpi Top-level encoder instance structure.
702 * \param[in] tx_size Transform size.
703 * \param[in] ref_best_rd Reference best RD.
704 * \param[in] cfl_search_range The search range of full RD search near the
705 * estimated best CFL parameter.
706 *
707 * \param[out] best_rd_stats RD stats of the best CFL parameter
708 * \param[out] best_cfl_alpha_idx Best CFL alpha index
709 * \param[out] best_cfl_alpha_signs Best CFL joint signs
710 *
711 */
cfl_rd_pick_alpha(MACROBLOCK * const x,const AV1_COMP * const cpi,TX_SIZE tx_size,int64_t ref_best_rd,int cfl_search_range,RD_STATS * best_rd_stats,uint8_t * best_cfl_alpha_idx,int8_t * best_cfl_alpha_signs)712 static int cfl_rd_pick_alpha(MACROBLOCK *const x, const AV1_COMP *const cpi,
713 TX_SIZE tx_size, int64_t ref_best_rd,
714 int cfl_search_range, RD_STATS *best_rd_stats,
715 uint8_t *best_cfl_alpha_idx,
716 int8_t *best_cfl_alpha_signs) {
717 assert(cfl_search_range >= 1 && cfl_search_range <= CFL_MAGS_SIZE);
718 const ModeCosts *mode_costs = &x->mode_costs;
719 RD_STATS cfl_rd_arr_u[CFL_MAGS_SIZE];
720 RD_STATS cfl_rd_arr_v[CFL_MAGS_SIZE];
721 MACROBLOCKD *const xd = &x->e_mbd;
722 int est_best_cfl_idx_u, est_best_cfl_idx_v;
723
724 av1_invalid_rd_stats(best_rd_stats);
725
726 // As the dc pred data is same for different values of alpha, enable the
727 // caching of dc pred data. Call clear_cfl_dc_pred_cache_flags() before
728 // returning to avoid the unintentional usage of cached dc pred data.
729 xd->cfl.use_dc_pred_cache = true;
730 // Evaluate alpha parameter of each chroma plane.
731 est_best_cfl_idx_u =
732 cfl_pick_plane_parameter(cpi, x, 1, tx_size, cfl_search_range);
733 est_best_cfl_idx_v =
734 cfl_pick_plane_parameter(cpi, x, 2, tx_size, cfl_search_range);
735
736 if (cfl_search_range == 1) {
737 // For cfl_search_range=1, further refinement of alpha is not enabled. Hence
738 // CfL index=0 for both the chroma planes implies invalid CfL mode.
739 if (est_best_cfl_idx_u == CFL_INDEX_ZERO &&
740 est_best_cfl_idx_v == CFL_INDEX_ZERO) {
741 set_invalid_cfl_parameters(best_cfl_alpha_idx, best_cfl_alpha_signs);
742 clear_cfl_dc_pred_cache_flags(&xd->cfl);
743 return 0;
744 }
745
746 int cfl_alpha_u, cfl_alpha_v;
747 CFL_SIGN_TYPE cfl_sign_u, cfl_sign_v;
748 const MB_MODE_INFO *mbmi = xd->mi[0];
749 cfl_idx_to_sign_and_alpha(est_best_cfl_idx_u, &cfl_sign_u, &cfl_alpha_u);
750 cfl_idx_to_sign_and_alpha(est_best_cfl_idx_v, &cfl_sign_v, &cfl_alpha_v);
751 const int joint_sign = cfl_sign_u * CFL_SIGNS + cfl_sign_v - 1;
752 // Compute alpha and mode signaling rate.
753 const int rate_overhead =
754 mode_costs->cfl_cost[joint_sign][CFL_PRED_U][cfl_alpha_u] +
755 mode_costs->cfl_cost[joint_sign][CFL_PRED_V][cfl_alpha_v] +
756 mode_costs
757 ->intra_uv_mode_cost[is_cfl_allowed(xd)][mbmi->mode][UV_CFL_PRED];
758 // Skip the CfL mode evaluation if the RD cost derived using the rate needed
759 // to signal the CfL mode and alpha parameter exceeds the ref_best_rd.
760 if (RDCOST(x->rdmult, rate_overhead, 0) > ref_best_rd) {
761 set_invalid_cfl_parameters(best_cfl_alpha_idx, best_cfl_alpha_signs);
762 clear_cfl_dc_pred_cache_flags(&xd->cfl);
763 return 0;
764 }
765 }
766
767 // Compute the rd cost of each chroma plane using the alpha parameters which
768 // were already evaluated.
769 cfl_pick_plane_rd(cpi, x, 1, tx_size, cfl_search_range, cfl_rd_arr_u,
770 est_best_cfl_idx_u);
771 cfl_pick_plane_rd(cpi, x, 2, tx_size, cfl_search_range, cfl_rd_arr_v,
772 est_best_cfl_idx_v);
773
774 clear_cfl_dc_pred_cache_flags(&xd->cfl);
775
776 for (int ui = 0; ui < CFL_MAGS_SIZE; ++ui) {
777 if (cfl_rd_arr_u[ui].rate == INT_MAX) continue;
778 int cfl_alpha_u;
779 CFL_SIGN_TYPE cfl_sign_u;
780 cfl_idx_to_sign_and_alpha(ui, &cfl_sign_u, &cfl_alpha_u);
781 for (int vi = 0; vi < CFL_MAGS_SIZE; ++vi) {
782 if (cfl_rd_arr_v[vi].rate == INT_MAX) continue;
783 int cfl_alpha_v;
784 CFL_SIGN_TYPE cfl_sign_v;
785 cfl_idx_to_sign_and_alpha(vi, &cfl_sign_v, &cfl_alpha_v);
786 // cfl_sign_u == CFL_SIGN_ZERO && cfl_sign_v == CFL_SIGN_ZERO is not a
787 // valid parameter for CFL
788 if (cfl_sign_u == CFL_SIGN_ZERO && cfl_sign_v == CFL_SIGN_ZERO) continue;
789 int joint_sign = cfl_sign_u * CFL_SIGNS + cfl_sign_v - 1;
790 RD_STATS rd_stats = cfl_rd_arr_u[ui];
791 av1_merge_rd_stats(&rd_stats, &cfl_rd_arr_v[vi]);
792 if (rd_stats.rate != INT_MAX) {
793 rd_stats.rate +=
794 mode_costs->cfl_cost[joint_sign][CFL_PRED_U][cfl_alpha_u];
795 rd_stats.rate +=
796 mode_costs->cfl_cost[joint_sign][CFL_PRED_V][cfl_alpha_v];
797 }
798 av1_rd_cost_update(x->rdmult, &rd_stats);
799 if (rd_stats.rdcost < best_rd_stats->rdcost) {
800 *best_rd_stats = rd_stats;
801 *best_cfl_alpha_idx =
802 (cfl_alpha_u << CFL_ALPHABET_SIZE_LOG2) + cfl_alpha_v;
803 *best_cfl_alpha_signs = joint_sign;
804 }
805 }
806 }
807 if (best_rd_stats->rdcost >= ref_best_rd) {
808 av1_invalid_rd_stats(best_rd_stats);
809 // Set invalid CFL parameters here since the rdcost is not better than
810 // ref_best_rd.
811 set_invalid_cfl_parameters(best_cfl_alpha_idx, best_cfl_alpha_signs);
812 return 0;
813 }
814 return 1;
815 }
816
should_prune_chroma_smooth_pred_based_on_source_variance(const AV1_COMP * cpi,const MACROBLOCK * x,BLOCK_SIZE bsize)817 static bool should_prune_chroma_smooth_pred_based_on_source_variance(
818 const AV1_COMP *cpi, const MACROBLOCK *x, BLOCK_SIZE bsize) {
819 if (!cpi->sf.intra_sf.prune_smooth_intra_mode_for_chroma) return false;
820
821 // If the source variance of both chroma planes is less than 20 (empirically
822 // derived), prune UV_SMOOTH_PRED.
823 for (int i = AOM_PLANE_U; i < av1_num_planes(&cpi->common); i++) {
824 const unsigned int variance = av1_get_perpixel_variance_facade(
825 cpi, &x->e_mbd, &x->plane[i].src, bsize, i);
826 if (variance >= 20) return false;
827 }
828 return true;
829 }
830
av1_rd_pick_intra_sbuv_mode(const AV1_COMP * const cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,uint8_t * skippable,BLOCK_SIZE bsize,TX_SIZE max_tx_size)831 int64_t av1_rd_pick_intra_sbuv_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
832 int *rate, int *rate_tokenonly,
833 int64_t *distortion, uint8_t *skippable,
834 BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
835 const AV1_COMMON *const cm = &cpi->common;
836 MACROBLOCKD *xd = &x->e_mbd;
837 MB_MODE_INFO *mbmi = xd->mi[0];
838 assert(!is_inter_block(mbmi));
839 MB_MODE_INFO best_mbmi = *mbmi;
840 int64_t best_rd = INT64_MAX, this_rd;
841 const ModeCosts *mode_costs = &x->mode_costs;
842 const IntraModeCfg *const intra_mode_cfg = &cpi->oxcf.intra_mode_cfg;
843
844 init_sbuv_mode(mbmi);
845
846 // Return if the current block does not correspond to a chroma block.
847 if (!xd->is_chroma_ref) {
848 *rate = 0;
849 *rate_tokenonly = 0;
850 *distortion = 0;
851 *skippable = 1;
852 return INT64_MAX;
853 }
854
855 // Only store reconstructed luma when there's chroma RDO. When there's no
856 // chroma RDO, the reconstructed luma will be stored in encode_superblock().
857 xd->cfl.store_y = store_cfl_required_rdo(cm, x);
858 if (xd->cfl.store_y) {
859 // Restore reconstructed luma values.
860 // TODO(chiyotsai@google.com): right now we are re-computing the txfm in
861 // this function everytime we search through uv modes. There is some
862 // potential speed up here if we cache the result to avoid redundant
863 // computation.
864 av1_encode_intra_block_plane(cpi, x, mbmi->bsize, AOM_PLANE_Y,
865 DRY_RUN_NORMAL,
866 cpi->optimize_seg_arr[mbmi->segment_id]);
867 xd->cfl.store_y = 0;
868 }
869 IntraModeSearchState intra_search_state;
870 init_intra_mode_search_state(&intra_search_state);
871 const CFL_ALLOWED_TYPE cfl_allowed = is_cfl_allowed(xd);
872
873 // Search through all non-palette modes.
874 for (int mode_idx = 0; mode_idx < UV_INTRA_MODES; ++mode_idx) {
875 int this_rate;
876 RD_STATS tokenonly_rd_stats;
877 UV_PREDICTION_MODE uv_mode = uv_rd_search_mode_order[mode_idx];
878
879 // Skip the current mode evaluation if the RD cost derived using the mode
880 // signaling rate exceeds the best_rd so far.
881 const int mode_rate =
882 mode_costs->intra_uv_mode_cost[cfl_allowed][mbmi->mode][uv_mode];
883 if (RDCOST(x->rdmult, mode_rate, 0) > best_rd) continue;
884
885 PREDICTION_MODE intra_mode = get_uv_mode(uv_mode);
886 const int is_diagonal_mode = av1_is_diagonal_mode(intra_mode);
887 const int is_directional_mode = av1_is_directional_mode(intra_mode);
888
889 if (is_diagonal_mode && !cpi->oxcf.intra_mode_cfg.enable_diagonal_intra)
890 continue;
891 if (is_directional_mode &&
892 !cpi->oxcf.intra_mode_cfg.enable_directional_intra)
893 continue;
894
895 if (!(cpi->sf.intra_sf.intra_uv_mode_mask[txsize_sqr_up_map[max_tx_size]] &
896 (1 << uv_mode)))
897 continue;
898 if (!intra_mode_cfg->enable_smooth_intra && uv_mode >= UV_SMOOTH_PRED &&
899 uv_mode <= UV_SMOOTH_H_PRED)
900 continue;
901
902 if (!intra_mode_cfg->enable_paeth_intra && uv_mode == UV_PAETH_PRED)
903 continue;
904
905 assert(mbmi->mode < INTRA_MODES);
906 if (cpi->sf.intra_sf.prune_chroma_modes_using_luma_winner &&
907 !(av1_derived_chroma_intra_mode_used_flag[mbmi->mode] & (1 << uv_mode)))
908 continue;
909
910 mbmi->uv_mode = uv_mode;
911
912 // Init variables for cfl and angle delta
913 const SPEED_FEATURES *sf = &cpi->sf;
914 mbmi->angle_delta[PLANE_TYPE_UV] = 0;
915 if (uv_mode == UV_CFL_PRED) {
916 if (!cfl_allowed || !intra_mode_cfg->enable_cfl_intra) continue;
917 assert(!is_directional_mode);
918 const TX_SIZE uv_tx_size = av1_get_tx_size(AOM_PLANE_U, xd);
919 if (!cfl_rd_pick_alpha(x, cpi, uv_tx_size, best_rd,
920 sf->intra_sf.cfl_search_range, &tokenonly_rd_stats,
921 &mbmi->cfl_alpha_idx, &mbmi->cfl_alpha_signs)) {
922 continue;
923 }
924 } else if (is_directional_mode && av1_use_angle_delta(mbmi->bsize) &&
925 intra_mode_cfg->enable_angle_delta) {
926 if (sf->intra_sf.chroma_intra_pruning_with_hog &&
927 !intra_search_state.dir_mode_skip_mask_ready) {
928 static const float thresh[2][4] = {
929 { -1.2f, 0.0f, 0.0f, 1.2f }, // Interframe
930 { -1.2f, -1.2f, -0.6f, 0.4f }, // Intraframe
931 };
932 const int is_chroma = 1;
933 const int is_intra_frame = frame_is_intra_only(cm);
934 prune_intra_mode_with_hog(
935 x, bsize, cm->seq_params->sb_size,
936 thresh[is_intra_frame]
937 [sf->intra_sf.chroma_intra_pruning_with_hog - 1],
938 intra_search_state.directional_mode_skip_mask, is_chroma);
939 intra_search_state.dir_mode_skip_mask_ready = 1;
940 }
941 if (intra_search_state.directional_mode_skip_mask[uv_mode]) {
942 continue;
943 }
944
945 // Search through angle delta
946 const int rate_overhead =
947 mode_costs->intra_uv_mode_cost[cfl_allowed][mbmi->mode][uv_mode];
948 if (!rd_pick_intra_angle_sbuv(cpi, x, bsize, rate_overhead, best_rd,
949 &this_rate, &tokenonly_rd_stats))
950 continue;
951 } else {
952 if (uv_mode == UV_SMOOTH_PRED &&
953 should_prune_chroma_smooth_pred_based_on_source_variance(cpi, x,
954 bsize))
955 continue;
956
957 // Predict directly if we don't need to search for angle delta.
958 if (!av1_txfm_uvrd(cpi, x, &tokenonly_rd_stats, bsize, best_rd)) {
959 continue;
960 }
961 }
962 const int mode_cost =
963 mode_costs->intra_uv_mode_cost[cfl_allowed][mbmi->mode][uv_mode];
964 this_rate = tokenonly_rd_stats.rate +
965 intra_mode_info_cost_uv(cpi, x, mbmi, bsize, mode_cost);
966 this_rd = RDCOST(x->rdmult, this_rate, tokenonly_rd_stats.dist);
967
968 if (this_rd < best_rd) {
969 best_mbmi = *mbmi;
970 best_rd = this_rd;
971 *rate = this_rate;
972 *rate_tokenonly = tokenonly_rd_stats.rate;
973 *distortion = tokenonly_rd_stats.dist;
974 *skippable = tokenonly_rd_stats.skip_txfm;
975 }
976 }
977
978 // Search palette mode
979 const int try_palette =
980 cpi->oxcf.tool_cfg.enable_palette &&
981 av1_allow_palette(cpi->common.features.allow_screen_content_tools,
982 mbmi->bsize);
983 if (try_palette) {
984 uint8_t *best_palette_color_map = x->palette_buffer->best_palette_color_map;
985 av1_rd_pick_palette_intra_sbuv(
986 cpi, x,
987 mode_costs->intra_uv_mode_cost[cfl_allowed][mbmi->mode][UV_DC_PRED],
988 best_palette_color_map, &best_mbmi, &best_rd, rate, rate_tokenonly,
989 distortion, skippable);
990 }
991
992 *mbmi = best_mbmi;
993 // Make sure we actually chose a mode
994 assert(best_rd < INT64_MAX);
995 return best_rd;
996 }
997
998 // Searches palette mode for luma channel in inter frame.
av1_search_palette_mode(IntraModeSearchState * intra_search_state,const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,unsigned int ref_frame_cost,PICK_MODE_CONTEXT * ctx,RD_STATS * this_rd_cost,int64_t best_rd)999 int av1_search_palette_mode(IntraModeSearchState *intra_search_state,
1000 const AV1_COMP *cpi, MACROBLOCK *x,
1001 BLOCK_SIZE bsize, unsigned int ref_frame_cost,
1002 PICK_MODE_CONTEXT *ctx, RD_STATS *this_rd_cost,
1003 int64_t best_rd) {
1004 const AV1_COMMON *const cm = &cpi->common;
1005 MB_MODE_INFO *const mbmi = x->e_mbd.mi[0];
1006 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1007 const int num_planes = av1_num_planes(cm);
1008 MACROBLOCKD *const xd = &x->e_mbd;
1009 int rate2 = 0;
1010 int64_t distortion2 = 0, best_rd_palette = best_rd, this_rd;
1011 int skippable = 0;
1012 uint8_t *const best_palette_color_map =
1013 x->palette_buffer->best_palette_color_map;
1014 uint8_t *const color_map = xd->plane[0].color_index_map;
1015 MB_MODE_INFO best_mbmi_palette = *mbmi;
1016 uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
1017 uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
1018 const ModeCosts *mode_costs = &x->mode_costs;
1019 const int *const intra_mode_cost =
1020 mode_costs->mbmode_cost[size_group_lookup[bsize]];
1021 const int rows = block_size_high[bsize];
1022 const int cols = block_size_wide[bsize];
1023
1024 mbmi->mode = DC_PRED;
1025 mbmi->uv_mode = UV_DC_PRED;
1026 mbmi->ref_frame[0] = INTRA_FRAME;
1027 mbmi->ref_frame[1] = NONE_FRAME;
1028 av1_zero(pmi->palette_size);
1029
1030 RD_STATS rd_stats_y;
1031 av1_invalid_rd_stats(&rd_stats_y);
1032 av1_rd_pick_palette_intra_sby(cpi, x, bsize, intra_mode_cost[DC_PRED],
1033 &best_mbmi_palette, best_palette_color_map,
1034 &best_rd_palette, &rd_stats_y.rate, NULL,
1035 &rd_stats_y.dist, &rd_stats_y.skip_txfm, NULL,
1036 ctx, best_blk_skip, best_tx_type_map);
1037 if (rd_stats_y.rate == INT_MAX || pmi->palette_size[0] == 0) {
1038 this_rd_cost->rdcost = INT64_MAX;
1039 return skippable;
1040 }
1041
1042 memcpy(x->txfm_search_info.blk_skip, best_blk_skip,
1043 sizeof(best_blk_skip[0]) * bsize_to_num_blk(bsize));
1044 av1_copy_array(xd->tx_type_map, best_tx_type_map, ctx->num_4x4_blk);
1045 memcpy(color_map, best_palette_color_map,
1046 rows * cols * sizeof(best_palette_color_map[0]));
1047
1048 skippable = rd_stats_y.skip_txfm;
1049 distortion2 = rd_stats_y.dist;
1050 rate2 = rd_stats_y.rate + ref_frame_cost;
1051 if (num_planes > 1) {
1052 if (intra_search_state->rate_uv_intra == INT_MAX) {
1053 // We have not found any good uv mode yet, so we need to search for it.
1054 TX_SIZE uv_tx = av1_get_tx_size(AOM_PLANE_U, xd);
1055 av1_rd_pick_intra_sbuv_mode(cpi, x, &intra_search_state->rate_uv_intra,
1056 &intra_search_state->rate_uv_tokenonly,
1057 &intra_search_state->dist_uvs,
1058 &intra_search_state->skip_uvs, bsize, uv_tx);
1059 intra_search_state->mode_uv = mbmi->uv_mode;
1060 intra_search_state->pmi_uv = *pmi;
1061 intra_search_state->uv_angle_delta = mbmi->angle_delta[PLANE_TYPE_UV];
1062 }
1063
1064 // We have found at least one good uv mode before, so copy and paste it
1065 // over.
1066 mbmi->uv_mode = intra_search_state->mode_uv;
1067 pmi->palette_size[1] = intra_search_state->pmi_uv.palette_size[1];
1068 if (pmi->palette_size[1] > 0) {
1069 memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
1070 intra_search_state->pmi_uv.palette_colors + PALETTE_MAX_SIZE,
1071 2 * PALETTE_MAX_SIZE * sizeof(pmi->palette_colors[0]));
1072 }
1073 mbmi->angle_delta[PLANE_TYPE_UV] = intra_search_state->uv_angle_delta;
1074 skippable = skippable && intra_search_state->skip_uvs;
1075 distortion2 += intra_search_state->dist_uvs;
1076 rate2 += intra_search_state->rate_uv_intra;
1077 }
1078
1079 if (skippable) {
1080 rate2 -= rd_stats_y.rate;
1081 if (num_planes > 1) rate2 -= intra_search_state->rate_uv_tokenonly;
1082 rate2 += mode_costs->skip_txfm_cost[av1_get_skip_txfm_context(xd)][1];
1083 } else {
1084 rate2 += mode_costs->skip_txfm_cost[av1_get_skip_txfm_context(xd)][0];
1085 }
1086 this_rd = RDCOST(x->rdmult, rate2, distortion2);
1087 this_rd_cost->rate = rate2;
1088 this_rd_cost->dist = distortion2;
1089 this_rd_cost->rdcost = this_rd;
1090 return skippable;
1091 }
1092
av1_search_palette_mode_luma(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,unsigned int ref_frame_cost,PICK_MODE_CONTEXT * ctx,RD_STATS * this_rd_cost,int64_t best_rd)1093 void av1_search_palette_mode_luma(const AV1_COMP *cpi, MACROBLOCK *x,
1094 BLOCK_SIZE bsize, unsigned int ref_frame_cost,
1095 PICK_MODE_CONTEXT *ctx,
1096 RD_STATS *this_rd_cost, int64_t best_rd) {
1097 MB_MODE_INFO *const mbmi = x->e_mbd.mi[0];
1098 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1099 MACROBLOCKD *const xd = &x->e_mbd;
1100 int64_t best_rd_palette = best_rd, this_rd;
1101 uint8_t *const best_palette_color_map =
1102 x->palette_buffer->best_palette_color_map;
1103 uint8_t *const color_map = xd->plane[0].color_index_map;
1104 MB_MODE_INFO best_mbmi_palette = *mbmi;
1105 uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
1106 uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
1107 const ModeCosts *mode_costs = &x->mode_costs;
1108 const int *const intra_mode_cost =
1109 mode_costs->mbmode_cost[size_group_lookup[bsize]];
1110 const int rows = block_size_high[bsize];
1111 const int cols = block_size_wide[bsize];
1112
1113 mbmi->mode = DC_PRED;
1114 mbmi->uv_mode = UV_DC_PRED;
1115 mbmi->ref_frame[0] = INTRA_FRAME;
1116 mbmi->ref_frame[1] = NONE_FRAME;
1117 av1_zero(pmi->palette_size);
1118
1119 RD_STATS rd_stats_y;
1120 av1_invalid_rd_stats(&rd_stats_y);
1121 av1_rd_pick_palette_intra_sby(cpi, x, bsize, intra_mode_cost[DC_PRED],
1122 &best_mbmi_palette, best_palette_color_map,
1123 &best_rd_palette, &rd_stats_y.rate, NULL,
1124 &rd_stats_y.dist, &rd_stats_y.skip_txfm, NULL,
1125 ctx, best_blk_skip, best_tx_type_map);
1126 if (rd_stats_y.rate == INT_MAX || pmi->palette_size[0] == 0) {
1127 this_rd_cost->rdcost = INT64_MAX;
1128 return;
1129 }
1130
1131 memcpy(x->txfm_search_info.blk_skip, best_blk_skip,
1132 sizeof(best_blk_skip[0]) * bsize_to_num_blk(bsize));
1133 av1_copy_array(xd->tx_type_map, best_tx_type_map, ctx->num_4x4_blk);
1134 memcpy(color_map, best_palette_color_map,
1135 rows * cols * sizeof(best_palette_color_map[0]));
1136
1137 rd_stats_y.rate += ref_frame_cost;
1138
1139 if (rd_stats_y.skip_txfm) {
1140 rd_stats_y.rate =
1141 ref_frame_cost +
1142 mode_costs->skip_txfm_cost[av1_get_skip_txfm_context(xd)][1];
1143 } else {
1144 rd_stats_y.rate +=
1145 mode_costs->skip_txfm_cost[av1_get_skip_txfm_context(xd)][0];
1146 }
1147 this_rd = RDCOST(x->rdmult, rd_stats_y.rate, rd_stats_y.dist);
1148 this_rd_cost->rate = rd_stats_y.rate;
1149 this_rd_cost->dist = rd_stats_y.dist;
1150 this_rd_cost->rdcost = this_rd;
1151 this_rd_cost->skip_txfm = rd_stats_y.skip_txfm;
1152 }
1153
1154 /*!\brief Get the intra prediction by searching through tx_type and tx_size.
1155 *
1156 * \ingroup intra_mode_search
1157 * \callergraph
1158 * Currently this function is only used in the intra frame code path for
1159 * winner-mode processing.
1160 *
1161 * \return Returns whether the current mode is an improvement over best_rd.
1162 */
intra_block_yrd(const AV1_COMP * const cpi,MACROBLOCK * x,BLOCK_SIZE bsize,const int * bmode_costs,int64_t * best_rd,int * rate,int * rate_tokenonly,int64_t * distortion,uint8_t * skippable,MB_MODE_INFO * best_mbmi,PICK_MODE_CONTEXT * ctx)1163 static AOM_INLINE int intra_block_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
1164 BLOCK_SIZE bsize, const int *bmode_costs,
1165 int64_t *best_rd, int *rate,
1166 int *rate_tokenonly, int64_t *distortion,
1167 uint8_t *skippable,
1168 MB_MODE_INFO *best_mbmi,
1169 PICK_MODE_CONTEXT *ctx) {
1170 MACROBLOCKD *const xd = &x->e_mbd;
1171 MB_MODE_INFO *const mbmi = xd->mi[0];
1172 RD_STATS rd_stats;
1173 // In order to improve txfm search, avoid rd based breakouts during winner
1174 // mode evaluation. Hence passing ref_best_rd as INT64_MAX by default when the
1175 // speed feature use_rd_based_breakout_for_intra_tx_search is disabled.
1176 int64_t ref_best_rd = cpi->sf.tx_sf.use_rd_based_breakout_for_intra_tx_search
1177 ? *best_rd
1178 : INT64_MAX;
1179 av1_pick_uniform_tx_size_type_yrd(cpi, x, &rd_stats, bsize, ref_best_rd);
1180 if (rd_stats.rate == INT_MAX) return 0;
1181 int this_rate_tokenonly = rd_stats.rate;
1182 if (!xd->lossless[mbmi->segment_id] && block_signals_txsize(mbmi->bsize)) {
1183 // av1_pick_uniform_tx_size_type_yrd above includes the cost of the tx_size
1184 // in the tokenonly rate, but for intra blocks, tx_size is always coded
1185 // (prediction granularity), so we account for it in the full rate,
1186 // not the tokenonly rate.
1187 this_rate_tokenonly -= tx_size_cost(x, bsize, mbmi->tx_size);
1188 }
1189 const int this_rate =
1190 rd_stats.rate +
1191 intra_mode_info_cost_y(cpi, x, mbmi, bsize, bmode_costs[mbmi->mode], 0);
1192 const int64_t this_rd = RDCOST(x->rdmult, this_rate, rd_stats.dist);
1193 if (this_rd < *best_rd) {
1194 *best_mbmi = *mbmi;
1195 *best_rd = this_rd;
1196 *rate = this_rate;
1197 *rate_tokenonly = this_rate_tokenonly;
1198 *distortion = rd_stats.dist;
1199 *skippable = rd_stats.skip_txfm;
1200 av1_copy_array(ctx->blk_skip, x->txfm_search_info.blk_skip,
1201 ctx->num_4x4_blk);
1202 av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
1203 return 1;
1204 }
1205 return 0;
1206 }
1207
1208 /*!\brief Search for the best filter_intra mode when coding inter frame.
1209 *
1210 * \ingroup intra_mode_search
1211 * \callergraph
1212 * This function loops through all filter_intra modes to find the best one.
1213 *
1214 * \remark Returns nothing, but updates the mbmi and rd_stats.
1215 */
handle_filter_intra_mode(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,const PICK_MODE_CONTEXT * ctx,RD_STATS * rd_stats_y,int mode_cost,int64_t best_rd,int64_t best_rd_so_far)1216 static INLINE void handle_filter_intra_mode(const AV1_COMP *cpi, MACROBLOCK *x,
1217 BLOCK_SIZE bsize,
1218 const PICK_MODE_CONTEXT *ctx,
1219 RD_STATS *rd_stats_y, int mode_cost,
1220 int64_t best_rd,
1221 int64_t best_rd_so_far) {
1222 MACROBLOCKD *const xd = &x->e_mbd;
1223 MB_MODE_INFO *const mbmi = xd->mi[0];
1224 assert(mbmi->mode == DC_PRED &&
1225 av1_filter_intra_allowed_bsize(&cpi->common, bsize));
1226
1227 RD_STATS rd_stats_y_fi;
1228 int filter_intra_selected_flag = 0;
1229 TX_SIZE best_tx_size = mbmi->tx_size;
1230 FILTER_INTRA_MODE best_fi_mode = FILTER_DC_PRED;
1231 uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
1232 memcpy(best_blk_skip, x->txfm_search_info.blk_skip,
1233 sizeof(best_blk_skip[0]) * ctx->num_4x4_blk);
1234 uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
1235 av1_copy_array(best_tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
1236 mbmi->filter_intra_mode_info.use_filter_intra = 1;
1237 for (FILTER_INTRA_MODE fi_mode = FILTER_DC_PRED; fi_mode < FILTER_INTRA_MODES;
1238 ++fi_mode) {
1239 mbmi->filter_intra_mode_info.filter_intra_mode = fi_mode;
1240 av1_pick_uniform_tx_size_type_yrd(cpi, x, &rd_stats_y_fi, bsize, best_rd);
1241 if (rd_stats_y_fi.rate == INT_MAX) continue;
1242 const int this_rate_tmp =
1243 rd_stats_y_fi.rate +
1244 intra_mode_info_cost_y(cpi, x, mbmi, bsize, mode_cost, 0);
1245 const int64_t this_rd_tmp =
1246 RDCOST(x->rdmult, this_rate_tmp, rd_stats_y_fi.dist);
1247
1248 if (this_rd_tmp != INT64_MAX && this_rd_tmp / 2 > best_rd) {
1249 break;
1250 }
1251 if (this_rd_tmp < best_rd_so_far) {
1252 best_tx_size = mbmi->tx_size;
1253 av1_copy_array(best_tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
1254 memcpy(best_blk_skip, x->txfm_search_info.blk_skip,
1255 sizeof(best_blk_skip[0]) * ctx->num_4x4_blk);
1256 best_fi_mode = fi_mode;
1257 *rd_stats_y = rd_stats_y_fi;
1258 filter_intra_selected_flag = 1;
1259 best_rd_so_far = this_rd_tmp;
1260 }
1261 }
1262
1263 mbmi->tx_size = best_tx_size;
1264 av1_copy_array(xd->tx_type_map, best_tx_type_map, ctx->num_4x4_blk);
1265 memcpy(x->txfm_search_info.blk_skip, best_blk_skip,
1266 sizeof(x->txfm_search_info.blk_skip[0]) * ctx->num_4x4_blk);
1267
1268 if (filter_intra_selected_flag) {
1269 mbmi->filter_intra_mode_info.use_filter_intra = 1;
1270 mbmi->filter_intra_mode_info.filter_intra_mode = best_fi_mode;
1271 } else {
1272 mbmi->filter_intra_mode_info.use_filter_intra = 0;
1273 }
1274 }
1275
1276 // Evaluate a given luma intra-mode in inter frames.
av1_handle_intra_y_mode(IntraModeSearchState * intra_search_state,const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,unsigned int ref_frame_cost,const PICK_MODE_CONTEXT * ctx,RD_STATS * rd_stats_y,int64_t best_rd,int * mode_cost_y,int64_t * rd_y,int64_t * best_model_rd,int64_t top_intra_model_rd[])1277 int av1_handle_intra_y_mode(IntraModeSearchState *intra_search_state,
1278 const AV1_COMP *cpi, MACROBLOCK *x,
1279 BLOCK_SIZE bsize, unsigned int ref_frame_cost,
1280 const PICK_MODE_CONTEXT *ctx, RD_STATS *rd_stats_y,
1281 int64_t best_rd, int *mode_cost_y, int64_t *rd_y,
1282 int64_t *best_model_rd,
1283 int64_t top_intra_model_rd[]) {
1284 const AV1_COMMON *cm = &cpi->common;
1285 const INTRA_MODE_SPEED_FEATURES *const intra_sf = &cpi->sf.intra_sf;
1286 MACROBLOCKD *const xd = &x->e_mbd;
1287 MB_MODE_INFO *const mbmi = xd->mi[0];
1288 assert(mbmi->ref_frame[0] == INTRA_FRAME);
1289 const PREDICTION_MODE mode = mbmi->mode;
1290 const ModeCosts *mode_costs = &x->mode_costs;
1291 const int mode_cost =
1292 mode_costs->mbmode_cost[size_group_lookup[bsize]][mode] + ref_frame_cost;
1293 const int skip_ctx = av1_get_skip_txfm_context(xd);
1294
1295 int known_rate = mode_cost;
1296 const int intra_cost_penalty = av1_get_intra_cost_penalty(
1297 cm->quant_params.base_qindex, cm->quant_params.y_dc_delta_q,
1298 cm->seq_params->bit_depth);
1299
1300 if (mode != DC_PRED && mode != PAETH_PRED) known_rate += intra_cost_penalty;
1301 known_rate += AOMMIN(mode_costs->skip_txfm_cost[skip_ctx][0],
1302 mode_costs->skip_txfm_cost[skip_ctx][1]);
1303 const int64_t known_rd = RDCOST(x->rdmult, known_rate, 0);
1304 if (known_rd > best_rd) {
1305 intra_search_state->skip_intra_modes = 1;
1306 return 0;
1307 }
1308
1309 const int is_directional_mode = av1_is_directional_mode(mode);
1310 if (is_directional_mode && av1_use_angle_delta(bsize) &&
1311 cpi->oxcf.intra_mode_cfg.enable_angle_delta) {
1312 if (intra_sf->intra_pruning_with_hog &&
1313 !intra_search_state->dir_mode_skip_mask_ready) {
1314 const float thresh[4] = { -1.2f, 0.0f, 0.0f, 1.2f };
1315 const int is_chroma = 0;
1316 prune_intra_mode_with_hog(x, bsize, cm->seq_params->sb_size,
1317 thresh[intra_sf->intra_pruning_with_hog - 1],
1318 intra_search_state->directional_mode_skip_mask,
1319 is_chroma);
1320 intra_search_state->dir_mode_skip_mask_ready = 1;
1321 }
1322 if (intra_search_state->directional_mode_skip_mask[mode]) return 0;
1323 }
1324 const TX_SIZE tx_size = AOMMIN(TX_32X32, max_txsize_lookup[bsize]);
1325 const int64_t this_model_rd =
1326 intra_model_rd(&cpi->common, x, 0, bsize, tx_size, /*use_hadamard=*/1);
1327
1328 const int model_rd_index_for_pruning =
1329 get_model_rd_index_for_pruning(x, intra_sf);
1330
1331 if (prune_intra_y_mode(this_model_rd, best_model_rd, top_intra_model_rd,
1332 intra_sf->top_intra_model_count_allowed,
1333 model_rd_index_for_pruning))
1334 return 0;
1335 av1_init_rd_stats(rd_stats_y);
1336 av1_pick_uniform_tx_size_type_yrd(cpi, x, rd_stats_y, bsize, best_rd);
1337
1338 // Pick filter intra modes.
1339 if (mode == DC_PRED && av1_filter_intra_allowed_bsize(cm, bsize)) {
1340 int try_filter_intra = 1;
1341 int64_t best_rd_so_far = INT64_MAX;
1342 if (rd_stats_y->rate != INT_MAX) {
1343 // best_rd_so_far is the rdcost of DC_PRED without using filter_intra.
1344 // Later, in filter intra search, best_rd_so_far is used for comparison.
1345 mbmi->filter_intra_mode_info.use_filter_intra = 0;
1346 const int tmp_rate =
1347 rd_stats_y->rate +
1348 intra_mode_info_cost_y(cpi, x, mbmi, bsize, mode_cost, 0);
1349 best_rd_so_far = RDCOST(x->rdmult, tmp_rate, rd_stats_y->dist);
1350 try_filter_intra = (best_rd_so_far / 2) <= best_rd;
1351 } else if (intra_sf->skip_filter_intra_in_inter_frames >= 1) {
1352 // As rd cost of luma intra dc mode is more than best_rd (i.e.,
1353 // rd_stats_y->rate = INT_MAX), skip the evaluation of filter intra modes.
1354 try_filter_intra = 0;
1355 }
1356
1357 if (try_filter_intra) {
1358 handle_filter_intra_mode(cpi, x, bsize, ctx, rd_stats_y, mode_cost,
1359 best_rd, best_rd_so_far);
1360 }
1361 }
1362
1363 if (rd_stats_y->rate == INT_MAX) return 0;
1364
1365 *mode_cost_y = intra_mode_info_cost_y(cpi, x, mbmi, bsize, mode_cost, 0);
1366 const int rate_y = rd_stats_y->skip_txfm
1367 ? mode_costs->skip_txfm_cost[skip_ctx][1]
1368 : rd_stats_y->rate;
1369 *rd_y = RDCOST(x->rdmult, rate_y + *mode_cost_y, rd_stats_y->dist);
1370 if (best_rd < (INT64_MAX / 2) && *rd_y > (best_rd + (best_rd >> 2))) {
1371 intra_search_state->skip_intra_modes = 1;
1372 return 0;
1373 }
1374
1375 return 1;
1376 }
1377
av1_search_intra_uv_modes_in_interframe(IntraModeSearchState * intra_search_state,const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,RD_STATS * rd_stats,const RD_STATS * rd_stats_y,RD_STATS * rd_stats_uv,int64_t best_rd)1378 int av1_search_intra_uv_modes_in_interframe(
1379 IntraModeSearchState *intra_search_state, const AV1_COMP *cpi,
1380 MACROBLOCK *x, BLOCK_SIZE bsize, RD_STATS *rd_stats,
1381 const RD_STATS *rd_stats_y, RD_STATS *rd_stats_uv, int64_t best_rd) {
1382 const AV1_COMMON *cm = &cpi->common;
1383 MACROBLOCKD *const xd = &x->e_mbd;
1384 MB_MODE_INFO *const mbmi = xd->mi[0];
1385 assert(mbmi->ref_frame[0] == INTRA_FRAME);
1386
1387 // TODO(chiyotsai@google.com): Consolidate the chroma search code here with
1388 // the one in av1_search_palette_mode.
1389 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1390 const int try_palette =
1391 cpi->oxcf.tool_cfg.enable_palette &&
1392 av1_allow_palette(cm->features.allow_screen_content_tools, mbmi->bsize);
1393
1394 assert(intra_search_state->rate_uv_intra == INT_MAX);
1395 if (intra_search_state->rate_uv_intra == INT_MAX) {
1396 // If no good uv-predictor had been found, search for it.
1397 const TX_SIZE uv_tx = av1_get_tx_size(AOM_PLANE_U, xd);
1398 av1_rd_pick_intra_sbuv_mode(cpi, x, &intra_search_state->rate_uv_intra,
1399 &intra_search_state->rate_uv_tokenonly,
1400 &intra_search_state->dist_uvs,
1401 &intra_search_state->skip_uvs, bsize, uv_tx);
1402 intra_search_state->mode_uv = mbmi->uv_mode;
1403 if (try_palette) intra_search_state->pmi_uv = *pmi;
1404 intra_search_state->uv_angle_delta = mbmi->angle_delta[PLANE_TYPE_UV];
1405
1406 const int uv_rate = intra_search_state->rate_uv_tokenonly;
1407 const int64_t uv_dist = intra_search_state->dist_uvs;
1408 const int64_t uv_rd = RDCOST(x->rdmult, uv_rate, uv_dist);
1409 if (uv_rd > best_rd) {
1410 // If there is no good intra uv-mode available, we can skip all intra
1411 // modes.
1412 intra_search_state->skip_intra_modes = 1;
1413 return 0;
1414 }
1415 }
1416
1417 // If we are here, then the encoder has found at least one good intra uv
1418 // predictor, so we can directly copy its statistics over.
1419 // TODO(any): the stats here is not right if the best uv mode is CFL but the
1420 // best y mode is palette.
1421 rd_stats_uv->rate = intra_search_state->rate_uv_tokenonly;
1422 rd_stats_uv->dist = intra_search_state->dist_uvs;
1423 rd_stats_uv->skip_txfm = intra_search_state->skip_uvs;
1424 rd_stats->skip_txfm = rd_stats_y->skip_txfm && rd_stats_uv->skip_txfm;
1425 mbmi->uv_mode = intra_search_state->mode_uv;
1426 if (try_palette) {
1427 pmi->palette_size[1] = intra_search_state->pmi_uv.palette_size[1];
1428 memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
1429 intra_search_state->pmi_uv.palette_colors + PALETTE_MAX_SIZE,
1430 2 * PALETTE_MAX_SIZE * sizeof(pmi->palette_colors[0]));
1431 }
1432 mbmi->angle_delta[PLANE_TYPE_UV] = intra_search_state->uv_angle_delta;
1433
1434 return 1;
1435 }
1436
1437 // Checks if odd delta angles can be pruned based on rdcosts of even delta
1438 // angles of the corresponding directional mode.
prune_luma_odd_delta_angles_using_rd_cost(const MB_MODE_INFO * const mbmi,const int64_t * const intra_modes_rd_cost,int64_t best_rd,int prune_luma_odd_delta_angles_in_intra)1439 static AOM_INLINE int prune_luma_odd_delta_angles_using_rd_cost(
1440 const MB_MODE_INFO *const mbmi, const int64_t *const intra_modes_rd_cost,
1441 int64_t best_rd, int prune_luma_odd_delta_angles_in_intra) {
1442 const int luma_delta_angle = mbmi->angle_delta[PLANE_TYPE_Y];
1443 if (!prune_luma_odd_delta_angles_in_intra ||
1444 !av1_is_directional_mode(mbmi->mode) || !(abs(luma_delta_angle) & 1) ||
1445 best_rd == INT64_MAX)
1446 return 0;
1447
1448 const int64_t rd_thresh = best_rd + (best_rd >> 3);
1449
1450 // Neighbour rdcosts are considered for pruning of odd delta angles as
1451 // mentioned below:
1452 // Delta angle Delta angle rdcost
1453 // to be pruned to be considered
1454 // -3 -2
1455 // -1 -2, 0
1456 // 1 0, 2
1457 // 3 2
1458 return intra_modes_rd_cost[luma_delta_angle + MAX_ANGLE_DELTA] > rd_thresh &&
1459 intra_modes_rd_cost[luma_delta_angle + MAX_ANGLE_DELTA + 2] >
1460 rd_thresh;
1461 }
1462
1463 // Finds the best non-intrabc mode on an intra frame.
av1_rd_pick_intra_sby_mode(const AV1_COMP * const cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,uint8_t * skippable,BLOCK_SIZE bsize,int64_t best_rd,PICK_MODE_CONTEXT * ctx)1464 int64_t av1_rd_pick_intra_sby_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
1465 int *rate, int *rate_tokenonly,
1466 int64_t *distortion, uint8_t *skippable,
1467 BLOCK_SIZE bsize, int64_t best_rd,
1468 PICK_MODE_CONTEXT *ctx) {
1469 MACROBLOCKD *const xd = &x->e_mbd;
1470 MB_MODE_INFO *const mbmi = xd->mi[0];
1471 assert(!is_inter_block(mbmi));
1472 int64_t best_model_rd = INT64_MAX;
1473 int is_directional_mode;
1474 uint8_t directional_mode_skip_mask[INTRA_MODES] = { 0 };
1475 // Flag to check rd of any intra mode is better than best_rd passed to this
1476 // function
1477 int beat_best_rd = 0;
1478 const int *bmode_costs;
1479 const IntraModeCfg *const intra_mode_cfg = &cpi->oxcf.intra_mode_cfg;
1480 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1481 const int try_palette =
1482 cpi->oxcf.tool_cfg.enable_palette &&
1483 av1_allow_palette(cpi->common.features.allow_screen_content_tools,
1484 mbmi->bsize);
1485 uint8_t *best_palette_color_map =
1486 try_palette ? x->palette_buffer->best_palette_color_map : NULL;
1487 const MB_MODE_INFO *above_mi = xd->above_mbmi;
1488 const MB_MODE_INFO *left_mi = xd->left_mbmi;
1489 const PREDICTION_MODE A = av1_above_block_mode(above_mi);
1490 const PREDICTION_MODE L = av1_left_block_mode(left_mi);
1491 const int above_ctx = intra_mode_context[A];
1492 const int left_ctx = intra_mode_context[L];
1493 bmode_costs = x->mode_costs.y_mode_costs[above_ctx][left_ctx];
1494
1495 mbmi->angle_delta[PLANE_TYPE_Y] = 0;
1496 const INTRA_MODE_SPEED_FEATURES *const intra_sf = &cpi->sf.intra_sf;
1497 if (intra_sf->intra_pruning_with_hog) {
1498 // Less aggressive thresholds are used here than those used in inter frame
1499 // encoding in av1_handle_intra_y_mode() because we want key frames/intra
1500 // frames to have higher quality.
1501 const float thresh[4] = { -1.2f, -1.2f, -0.6f, 0.4f };
1502 const int is_chroma = 0;
1503 prune_intra_mode_with_hog(x, bsize, cpi->common.seq_params->sb_size,
1504 thresh[intra_sf->intra_pruning_with_hog - 1],
1505 directional_mode_skip_mask, is_chroma);
1506 }
1507 mbmi->filter_intra_mode_info.use_filter_intra = 0;
1508 pmi->palette_size[0] = 0;
1509
1510 // Set params for mode evaluation
1511 set_mode_eval_params(cpi, x, MODE_EVAL);
1512
1513 MB_MODE_INFO best_mbmi = *mbmi;
1514 const int max_winner_mode_count =
1515 winner_mode_count_allowed[cpi->sf.winner_mode_sf.multi_winner_mode_type];
1516 zero_winner_mode_stats(bsize, max_winner_mode_count, x->winner_mode_stats);
1517 x->winner_mode_count = 0;
1518
1519 // Searches the intra-modes except for intrabc, palette, and filter_intra.
1520 int64_t top_intra_model_rd[TOP_INTRA_MODEL_COUNT];
1521 for (int i = 0; i < TOP_INTRA_MODEL_COUNT; i++) {
1522 top_intra_model_rd[i] = INT64_MAX;
1523 }
1524
1525 // Initialize the rdcost corresponding to all the directional and
1526 // non-directional intra modes.
1527 // 1. For directional modes, it stores the rdcost values for delta angles -4,
1528 // -3, ..., 3, 4.
1529 // 2. The rdcost value for luma_delta_angle is stored at index
1530 // luma_delta_angle + MAX_ANGLE_DELTA + 1.
1531 // 3. The rdcost values for fictitious/nonexistent luma_delta_angle -4 and 4
1532 // (array indices 0 and 8) are always set to INT64_MAX (the initial value).
1533 int64_t intra_modes_rd_cost[INTRA_MODE_END]
1534 [SIZE_OF_ANGLE_DELTA_RD_COST_ARRAY];
1535 for (int i = 0; i < INTRA_MODE_END; i++) {
1536 for (int j = 0; j < SIZE_OF_ANGLE_DELTA_RD_COST_ARRAY; j++) {
1537 intra_modes_rd_cost[i][j] = INT64_MAX;
1538 }
1539 }
1540
1541 for (int mode_idx = INTRA_MODE_START; mode_idx < LUMA_MODE_COUNT;
1542 ++mode_idx) {
1543 set_y_mode_and_delta_angle(mode_idx, mbmi,
1544 intra_sf->prune_luma_odd_delta_angles_in_intra);
1545 RD_STATS this_rd_stats;
1546 int this_rate, this_rate_tokenonly, s;
1547 int is_diagonal_mode;
1548 int64_t this_distortion, this_rd;
1549 const int luma_delta_angle = mbmi->angle_delta[PLANE_TYPE_Y];
1550
1551 is_diagonal_mode = av1_is_diagonal_mode(mbmi->mode);
1552 if (is_diagonal_mode && !intra_mode_cfg->enable_diagonal_intra) continue;
1553 if (av1_is_directional_mode(mbmi->mode) &&
1554 !intra_mode_cfg->enable_directional_intra)
1555 continue;
1556
1557 // The smooth prediction mode appears to be more frequently picked
1558 // than horizontal / vertical smooth prediction modes. Hence treat
1559 // them differently in speed features.
1560 if ((!intra_mode_cfg->enable_smooth_intra ||
1561 intra_sf->disable_smooth_intra) &&
1562 (mbmi->mode == SMOOTH_H_PRED || mbmi->mode == SMOOTH_V_PRED))
1563 continue;
1564 if (!intra_mode_cfg->enable_smooth_intra && mbmi->mode == SMOOTH_PRED)
1565 continue;
1566
1567 // The functionality of filter intra modes and smooth prediction
1568 // overlap. Hence smooth prediction is pruned only if all the
1569 // filter intra modes are enabled.
1570 if (intra_sf->disable_smooth_intra &&
1571 intra_sf->prune_filter_intra_level == 0 && mbmi->mode == SMOOTH_PRED)
1572 continue;
1573 if (!intra_mode_cfg->enable_paeth_intra && mbmi->mode == PAETH_PRED)
1574 continue;
1575
1576 // Skip the evaluation of modes that do not match with the winner mode in
1577 // x->mb_mode_cache.
1578 if (x->use_mb_mode_cache && mbmi->mode != x->mb_mode_cache->mode) continue;
1579
1580 is_directional_mode = av1_is_directional_mode(mbmi->mode);
1581 if (is_directional_mode && directional_mode_skip_mask[mbmi->mode]) continue;
1582 if (is_directional_mode &&
1583 !(av1_use_angle_delta(bsize) && intra_mode_cfg->enable_angle_delta) &&
1584 luma_delta_angle != 0)
1585 continue;
1586
1587 // Use intra_y_mode_mask speed feature to skip intra mode evaluation.
1588 if (!(intra_sf->intra_y_mode_mask[max_txsize_lookup[bsize]] &
1589 (1 << mbmi->mode)))
1590 continue;
1591
1592 if (prune_luma_odd_delta_angles_using_rd_cost(
1593 mbmi, intra_modes_rd_cost[mbmi->mode], best_rd,
1594 intra_sf->prune_luma_odd_delta_angles_in_intra))
1595 continue;
1596
1597 const TX_SIZE tx_size = AOMMIN(TX_32X32, max_txsize_lookup[bsize]);
1598 const int64_t this_model_rd =
1599 intra_model_rd(&cpi->common, x, 0, bsize, tx_size, /*use_hadamard=*/1);
1600
1601 const int model_rd_index_for_pruning =
1602 get_model_rd_index_for_pruning(x, intra_sf);
1603
1604 if (prune_intra_y_mode(this_model_rd, &best_model_rd, top_intra_model_rd,
1605 intra_sf->top_intra_model_count_allowed,
1606 model_rd_index_for_pruning))
1607 continue;
1608
1609 // Builds the actual prediction. The prediction from
1610 // model_intra_yrd_and_prune was just an estimation that did not take into
1611 // account the effect of txfm pipeline, so we need to redo it for real
1612 // here.
1613 av1_pick_uniform_tx_size_type_yrd(cpi, x, &this_rd_stats, bsize, best_rd);
1614 this_rate_tokenonly = this_rd_stats.rate;
1615 this_distortion = this_rd_stats.dist;
1616 s = this_rd_stats.skip_txfm;
1617
1618 if (this_rate_tokenonly == INT_MAX) continue;
1619
1620 if (!xd->lossless[mbmi->segment_id] && block_signals_txsize(mbmi->bsize)) {
1621 // av1_pick_uniform_tx_size_type_yrd above includes the cost of the
1622 // tx_size in the tokenonly rate, but for intra blocks, tx_size is always
1623 // coded (prediction granularity), so we account for it in the full rate,
1624 // not the tokenonly rate.
1625 this_rate_tokenonly -= tx_size_cost(x, bsize, mbmi->tx_size);
1626 }
1627 this_rate =
1628 this_rd_stats.rate +
1629 intra_mode_info_cost_y(cpi, x, mbmi, bsize, bmode_costs[mbmi->mode], 0);
1630 this_rd = RDCOST(x->rdmult, this_rate, this_distortion);
1631
1632 // Visual quality adjustment based on recon vs source variance.
1633 if ((cpi->oxcf.mode == ALLINTRA) && (this_rd != INT64_MAX)) {
1634 this_rd = (int64_t)(this_rd * intra_rd_variance_factor(cpi, x, bsize));
1635 }
1636
1637 intra_modes_rd_cost[mbmi->mode][luma_delta_angle + MAX_ANGLE_DELTA + 1] =
1638 this_rd;
1639
1640 // Collect mode stats for multiwinner mode processing
1641 const int txfm_search_done = 1;
1642 store_winner_mode_stats(
1643 &cpi->common, x, mbmi, NULL, NULL, NULL, 0, NULL, bsize, this_rd,
1644 cpi->sf.winner_mode_sf.multi_winner_mode_type, txfm_search_done);
1645 if (this_rd < best_rd) {
1646 best_mbmi = *mbmi;
1647 best_rd = this_rd;
1648 // Setting beat_best_rd flag because current mode rd is better than
1649 // best_rd passed to this function
1650 beat_best_rd = 1;
1651 *rate = this_rate;
1652 *rate_tokenonly = this_rate_tokenonly;
1653 *distortion = this_distortion;
1654 *skippable = s;
1655 memcpy(ctx->blk_skip, x->txfm_search_info.blk_skip,
1656 sizeof(x->txfm_search_info.blk_skip[0]) * ctx->num_4x4_blk);
1657 av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
1658 }
1659 }
1660
1661 // Searches palette
1662 if (try_palette) {
1663 av1_rd_pick_palette_intra_sby(
1664 cpi, x, bsize, bmode_costs[DC_PRED], &best_mbmi, best_palette_color_map,
1665 &best_rd, rate, rate_tokenonly, distortion, skippable, &beat_best_rd,
1666 ctx, ctx->blk_skip, ctx->tx_type_map);
1667 }
1668
1669 // Searches filter_intra
1670 if (beat_best_rd && av1_filter_intra_allowed_bsize(&cpi->common, bsize)) {
1671 if (rd_pick_filter_intra_sby(cpi, x, rate, rate_tokenonly, distortion,
1672 skippable, bsize, bmode_costs[DC_PRED],
1673 best_mbmi.mode, &best_rd, &best_model_rd,
1674 ctx)) {
1675 best_mbmi = *mbmi;
1676 }
1677 }
1678
1679 // No mode is identified with less rd value than best_rd passed to this
1680 // function. In such cases winner mode processing is not necessary and return
1681 // best_rd as INT64_MAX to indicate best mode is not identified
1682 if (!beat_best_rd) return INT64_MAX;
1683
1684 // In multi-winner mode processing, perform tx search for few best modes
1685 // identified during mode evaluation. Winner mode processing uses best tx
1686 // configuration for tx search.
1687 if (cpi->sf.winner_mode_sf.multi_winner_mode_type) {
1688 int best_mode_idx = 0;
1689 int block_width, block_height;
1690 uint8_t *color_map_dst = xd->plane[PLANE_TYPE_Y].color_index_map;
1691 av1_get_block_dimensions(bsize, AOM_PLANE_Y, xd, &block_width,
1692 &block_height, NULL, NULL);
1693
1694 for (int mode_idx = 0; mode_idx < x->winner_mode_count; mode_idx++) {
1695 *mbmi = x->winner_mode_stats[mode_idx].mbmi;
1696 if (is_winner_mode_processing_enabled(cpi, x, mbmi, 0)) {
1697 // Restore color_map of palette mode before winner mode processing
1698 if (mbmi->palette_mode_info.palette_size[0] > 0) {
1699 uint8_t *color_map_src =
1700 x->winner_mode_stats[mode_idx].color_index_map;
1701 memcpy(color_map_dst, color_map_src,
1702 block_width * block_height * sizeof(*color_map_src));
1703 }
1704 // Set params for winner mode evaluation
1705 set_mode_eval_params(cpi, x, WINNER_MODE_EVAL);
1706
1707 // Winner mode processing
1708 // If previous searches use only the default tx type/no R-D optimization
1709 // of quantized coeffs, do an extra search for the best tx type/better
1710 // R-D optimization of quantized coeffs
1711 if (intra_block_yrd(cpi, x, bsize, bmode_costs, &best_rd, rate,
1712 rate_tokenonly, distortion, skippable, &best_mbmi,
1713 ctx))
1714 best_mode_idx = mode_idx;
1715 }
1716 }
1717 // Copy color_map of palette mode for final winner mode
1718 if (best_mbmi.palette_mode_info.palette_size[0] > 0) {
1719 uint8_t *color_map_src =
1720 x->winner_mode_stats[best_mode_idx].color_index_map;
1721 memcpy(color_map_dst, color_map_src,
1722 block_width * block_height * sizeof(*color_map_src));
1723 }
1724 } else {
1725 // If previous searches use only the default tx type/no R-D optimization of
1726 // quantized coeffs, do an extra search for the best tx type/better R-D
1727 // optimization of quantized coeffs
1728 if (is_winner_mode_processing_enabled(cpi, x, mbmi, 0)) {
1729 // Set params for winner mode evaluation
1730 set_mode_eval_params(cpi, x, WINNER_MODE_EVAL);
1731 *mbmi = best_mbmi;
1732 intra_block_yrd(cpi, x, bsize, bmode_costs, &best_rd, rate,
1733 rate_tokenonly, distortion, skippable, &best_mbmi, ctx);
1734 }
1735 }
1736 *mbmi = best_mbmi;
1737 av1_copy_array(xd->tx_type_map, ctx->tx_type_map, ctx->num_4x4_blk);
1738 return best_rd;
1739 }
1740