Home
last modified time | relevance | path

Searched refs:TX_32X32 (Results 1 – 17 of 17) sorted by relevance

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/
Dvp9_common_data.c107 TX_32X32, TX_32X32, TX_32X32, TX_32X32
114 TX_32X32, // ALLOW_32X32
115 TX_32X32, // TX_MODE_SELECT
Dvp9_pred_common.h108 case TX_32X32: in get_tx_probs()
129 case TX_32X32: in get_tx_counts()
Dvp9_entropy.c726 vp9_copy(cm->fc.coef_probs[TX_32X32], default_coef_probs_32x32); in vp9_default_coef_probs()
781 for (t = TX_4X4; t <= TX_32X32; t++) in vp9_adapt_coef_probs()
Dvp9_entropymode.c283 tx_count_32x32p[TX_32X32]; in tx_counts_to_branch_counts_32x32()
286 tx_count_32x32p[TX_32X32]; in tx_counts_to_branch_counts_32x32()
288 ct_32x32p[2][1] = tx_count_32x32p[TX_32X32]; in tx_counts_to_branch_counts_32x32()
Dvp9_enums.h64 TX_32X32 = 3, // 32x32 transform enumerator
Dvp9_loopfilter.c764 lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32]; in vp9_setup_mask()
765 lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32]; in vp9_setup_mask()
766 lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32]; in vp9_setup_mask()
767 lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32]; in vp9_setup_mask()
790 for (i = 0; i < TX_32X32; i++) { in vp9_setup_mask()
824 for (i = 0; i < TX_32X32; i++) { in vp9_setup_mask()
846 for (i = 0; i < TX_32X32; i++) { in vp9_setup_mask()
953 if (tx_size == TX_32X32) { in filter_block_plane_non420()
Dvp9_entropy.h165 case TX_32X32: in get_entropy_context()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/test/
Dpartial_idct_test.cc67 case TX_32X32: in TEST_P()
124 TX_32X32, 34),
127 TX_32X32, 1),
149 TX_32X32, 1),
173 TX_32X32, 34),
176 TX_32X32, 1),
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/
Dvp9_speed_features.c72 sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_good_speed_feature()
73 sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_good_speed_feature()
172 sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_rt_speed_feature()
173 sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_rt_speed_feature()
210 sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_rt_speed_feature()
212 sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; in set_rt_speed_feature()
246 sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_ONLY; in set_rt_speed_feature()
Dvp9_encodemb.c129 const int mul = 1 + (tx_size == TX_32X32); in optimize_b()
334 case TX_32X32: in vp9_xform_quant()
408 case TX_32X32: in encode_block()
503 case TX_32X32: in encode_block_intra()
504 scan_order = &vp9_default_scan_orders[TX_32X32]; in encode_block_intra()
506 vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode, in encode_block_intra()
Dvp9_rdopt.c179 for (t = TX_4X4; t <= TX_32X32; ++t) in fill_token_costs()
498 } else if (tx_size == TX_32X32) { in model_rd_for_sb_y_tx()
634 int shift = tx_size == TX_32X32 ? 0 : 2; in dist_block()
727 case TX_32X32: in vp9_get_entropy_contexts()
853 tx_cache[ALLOW_32X32] = rd[MIN(max_tx_size, TX_32X32)][0]; in choose_txfm_size_from_rd()
855 if (max_tx_size == TX_32X32 && best_tx == TX_32X32) { in choose_txfm_size_from_rd()
856 tx_cache[TX_MODE_SELECT] = rd[TX_32X32][1]; in choose_txfm_size_from_rd()
935 if (max_tx_size == TX_32X32 && best_tx == TX_32X32) { in choose_txfm_size_from_modelrd()
Dvp9_bitstream.c92 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) in write_selected_tx_size()
676 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) in update_coef_probs()
Dvp9_onyx_if.c2755 for (t = TX_4X4; t <= TX_32X32; t++)
Dvp9_encodeframe.c3325 count32x32 += cm->counts.tx.p32x32[i][TX_32X32]; in vp9_encode_frame()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/decoder/
Dvp9_detokenize.c101 const int dq_shift = (tx_size == TX_32X32); in decode_coefs()
Dvp9_decodemv.c69 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) in read_selected_tx_size()
Dvp9_decodeframe.c216 case TX_32X32: in inverse_transform_block()
229 else if (tx_size == TX_32X32 && eob <= 34) in inverse_transform_block()