/external/libaom/libaom/aom_scale/generic/ |
D | yv12extend.c | 260 uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst); in aom_yv12_copy_y_c() local 262 memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t)); in aom_yv12_copy_y_c() 264 dst16 += dst_ybc->y_stride; in aom_yv12_copy_y_c() 284 uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst); in aom_yv12_copy_u_c() local 286 memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t)); in aom_yv12_copy_u_c() 288 dst16 += dst_bc->uv_stride; in aom_yv12_copy_u_c() 308 uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst); in aom_yv12_copy_v_c() local 310 memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t)); in aom_yv12_copy_v_c() 312 dst16 += dst_bc->uv_stride; in aom_yv12_copy_v_c() 335 uint16_t *dst16 = in aom_yv12_partial_copy_y_c() local [all …]
|
/external/libaom/libaom/test/ |
D | transform_test_base.h | 61 uint16_t *dst16 = reinterpret_cast<uint16_t *>( in RunAccuracyCheck() local 75 dst16[j] = rnd.Rand16() & mask_; in RunAccuracyCheck() 76 test_input_block[j] = src16[j] - dst16[j]; in RunAccuracyCheck() 86 RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_)); in RunAccuracyCheck() 91 bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunAccuracyCheck() 112 aom_free(dst16); in RunAccuracyCheck() 270 uint16_t *dst16 = reinterpret_cast<uint16_t *>( in RunInvAccuracyCheck() local 284 dst16[j] = rnd.Rand16() & mask_; in RunInvAccuracyCheck() 285 in[j] = src16[j] - dst16[j]; in RunInvAccuracyCheck() 295 RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_)); in RunInvAccuracyCheck() [all …]
|
/external/libvpx/libvpx/test/ |
D | fdct8x8_test.cc | 223 DECLARE_ALIGNED(16, uint16_t, dst16[64]); in RunRoundTripErrorCheck() 237 dst16[j] = rnd.Rand16() & mask_; in RunRoundTripErrorCheck() 238 test_input_block[j] = src16[j] - dst16[j]; in RunRoundTripErrorCheck() 261 RunInvTxfm(test_temp_block, CAST_TO_BYTEPTR(dst16), pitch_)); in RunRoundTripErrorCheck() 268 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunRoundTripErrorCheck() 299 DECLARE_ALIGNED(16, uint16_t, dst16[64]); in RunExtremalCheck() 322 dst16[j] = 0; in RunExtremalCheck() 325 dst16[j] = mask_; in RunExtremalCheck() 328 dst16[j] = rnd.Rand8() % 2 ? mask_ : 0; in RunExtremalCheck() 330 test_input_block[j] = src16[j] - dst16[j]; in RunExtremalCheck() [all …]
|
D | dct32x32_test.cc | 123 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); in TEST_P() 137 dst16[j] = rnd.Rand16() & mask_; in TEST_P() 138 test_input_block[j] = src16[j] - dst16[j]; in TEST_P() 149 inv_txfm_(test_temp_block, CAST_TO_BYTEPTR(dst16), 32)); in TEST_P() 156 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in TEST_P() 270 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); in TEST_P() 286 dst16[j] = rnd.Rand16() & mask_; in TEST_P() 287 in[j] = src16[j] - dst16[j]; in TEST_P() 300 ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, CAST_TO_BYTEPTR(dst16), 32)); in TEST_P() 306 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in TEST_P()
|
D | dct16x16_test.cc | 330 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); in RunAccuracyCheck() 343 dst16[j] = rnd.Rand16() & mask_; in RunAccuracyCheck() 344 test_input_block[j] = src16[j] - dst16[j]; in RunAccuracyCheck() 356 RunInvTxfm(test_temp_block, CAST_TO_BYTEPTR(dst16), pitch_)); in RunAccuracyCheck() 363 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunAccuracyCheck() 442 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); in RunQuantCheck() 464 memset(dst16, 0, kNumCoeffs * sizeof(uint16_t)); in RunQuantCheck() 481 RunInvTxfm(output_ref_block, CAST_TO_BYTEPTR(dst16), pitch_)); in RunQuantCheck() 488 for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]); in RunQuantCheck() 502 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); in RunInvAccuracyCheck() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 150 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 169 …store i16 %bswap0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), ali… 170 …store i16 %bswap1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), ali… 171 …store i16 %bswap2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), ali… 172 …store i16 %bswap3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), ali… 173 …store i16 %bswap4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), ali… 174 …store i16 %bswap5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), ali… 175 …store i16 %bswap6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), ali… 176 …store i16 %bswap7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), ali… [all …]
|
D | bitreverse.ll | 15 @dst16 = common global [16 x i16] zeroinitializer, align 32 152 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 171 …store i16 %bitreverse0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0)… 172 …store i16 %bitreverse1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1)… 173 …store i16 %bitreverse2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2)… 174 …store i16 %bitreverse3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3)… 175 …store i16 %bitreverse4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4)… 176 …store i16 %bitreverse5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5)… 177 …store i16 %bitreverse6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6)… 178 …store i16 %bitreverse7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7)… [all …]
|
D | fptoui.ll | 14 @dst16 = common global [32 x i16] zeroinitializer, align 64 215 ; SSE-NEXT: store i16 [[CVT0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 216 ; SSE-NEXT: store i16 [[CVT1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 217 ; SSE-NEXT: store i16 [[CVT2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 218 ; SSE-NEXT: store i16 [[CVT3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 219 ; SSE-NEXT: store i16 [[CVT4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 220 ; SSE-NEXT: store i16 [[CVT5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 221 ; SSE-NEXT: store i16 [[CVT6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 222 ; SSE-NEXT: store i16 [[CVT7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 242 ; AVX256-NEXT: store i16 [[CVT0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, … [all …]
|
D | cttz.ll | 14 @dst16 = common global [16 x i16] zeroinitializer, align 32 200 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 219 …store i16 %cttz0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), alig… 220 …store i16 %cttz1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), alig… 221 …store i16 %cttz2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), alig… 222 …store i16 %cttz3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), alig… 223 …store i16 %cttz4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), alig… 224 …store i16 %cttz5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), alig… 225 …store i16 %cttz6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), alig… 226 …store i16 %cttz7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), alig… [all …]
|
D | ctlz.ll | 14 @dst16 = common global [16 x i16] zeroinitializer, align 32 200 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 219 …store i16 %ctlz0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), alig… 220 …store i16 %ctlz1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), alig… 221 …store i16 %ctlz2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), alig… 222 …store i16 %ctlz3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), alig… 223 …store i16 %ctlz4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), alig… 224 …store i16 %ctlz5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), alig… 225 …store i16 %ctlz6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), alig… 226 …store i16 %ctlz7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), alig… [all …]
|
D | ctpop.ll | 14 @dst16 = common global [16 x i16] zeroinitializer, align 32 247 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 266 …store i16 %ctpop0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), ali… 267 …store i16 %ctpop1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), ali… 268 …store i16 %ctpop2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), ali… 269 …store i16 %ctpop3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), ali… 270 …store i16 %ctpop4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), ali… 271 …store i16 %ctpop5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), ali… 272 …store i16 %ctpop6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), ali… 273 …store i16 %ctpop7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), ali… [all …]
|
D | fptosi.ll | 14 @dst16 = common global [32 x i16] zeroinitializer, align 64 170 ; SSE-NEXT: store i16 [[CVT0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 171 ; SSE-NEXT: store i16 [[CVT1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 172 ; SSE-NEXT: store i16 [[CVT2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 173 ; SSE-NEXT: store i16 [[CVT3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 174 ; SSE-NEXT: store i16 [[CVT4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 175 ; SSE-NEXT: store i16 [[CVT5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 176 ; SSE-NEXT: store i16 [[CVT6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 177 ; SSE-NEXT: store i16 [[CVT7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @dst16, i32… 183 ; AVX-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([32 x i16]* @dst16 to <8 x i16>*), ali… [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 150 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 169 …store i16 %bswap0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), ali… 170 …store i16 %bswap1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), ali… 171 …store i16 %bswap2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), ali… 172 …store i16 %bswap3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), ali… 173 …store i16 %bswap4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), ali… 174 …store i16 %bswap5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), ali… 175 …store i16 %bswap6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), ali… 176 …store i16 %bswap7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), ali… [all …]
|
D | ctlz.ll | 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 180 ; CHECK-NEXT: store i16 [[CTLZ0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 181 ; CHECK-NEXT: store i16 [[CTLZ1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 182 ; CHECK-NEXT: store i16 [[CTLZ2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 183 ; CHECK-NEXT: store i16 [[CTLZ3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 184 ; CHECK-NEXT: store i16 [[CTLZ4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 185 ; CHECK-NEXT: store i16 [[CTLZ5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 186 ; CHECK-NEXT: store i16 [[CTLZ6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 187 ; CHECK-NEXT: store i16 [[CTLZ7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 206 …store i16 %ctlz0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), alig… [all …]
|
D | cttz.ll | 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 180 ; CHECK-NEXT: store i16 [[CTTZ0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 181 ; CHECK-NEXT: store i16 [[CTTZ1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 182 ; CHECK-NEXT: store i16 [[CTTZ2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 183 ; CHECK-NEXT: store i16 [[CTTZ3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 184 ; CHECK-NEXT: store i16 [[CTTZ4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 185 ; CHECK-NEXT: store i16 [[CTTZ5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 186 ; CHECK-NEXT: store i16 [[CTTZ6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 187 ; CHECK-NEXT: store i16 [[CTTZ7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, … 206 …store i16 %cttz0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), alig… [all …]
|
D | ctpop.ll | 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 138 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 157 …store i16 %ctpop0, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), ali… 158 …store i16 %ctpop1, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), ali… 159 …store i16 %ctpop2, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), ali… 160 …store i16 %ctpop3, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), ali… 161 …store i16 %ctpop4, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), ali… 162 …store i16 %ctpop5, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), ali… 163 …store i16 %ctpop6, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), ali… 164 …store i16 %ctpop7, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), ali… [all …]
|
D | bitreverse.ll | 15 @dst16 = common global [16 x i16] zeroinitializer, align 32 244 …16 [[BITREVERSE0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), al… 245 …16 [[BITREVERSE1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), al… 246 …16 [[BITREVERSE2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), al… 247 …16 [[BITREVERSE3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), al… 248 …16 [[BITREVERSE4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), al… 249 …16 [[BITREVERSE5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), al… 250 …16 [[BITREVERSE6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), al… 251 …16 [[BITREVERSE7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), al… 257 ; AVX-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), ali… [all …]
|
/external/libaom/libaom/av1/common/ |
D | cdef_block.h | 43 typedef void (*cdef_filter_block_func)(uint8_t *dst8, uint16_t *dst16, 52 void cdef_filter_fb(uint8_t *dst8, uint16_t *dst16, int dstride, uint16_t *in,
|
D | cdef_block.c | 114 void cdef_filter_block_c(uint8_t *dst8, uint16_t *dst16, int dstride, in cdef_filter_block_c() argument 159 dst16[i * dstride + j] = (uint16_t)y; in cdef_filter_block_c() 176 void cdef_filter_fb(uint8_t *dst8, uint16_t *dst16, int dstride, uint16_t *in, in cdef_filter_fb() argument 207 dst16[(bi << (bsizex + bsizey)) + (iy << bsizex) + ix] = in cdef_filter_fb() 248 &dst16[dirinit ? bi << (bsizex + bsizey) in cdef_filter_fb()
|
D | convolve.c | 462 CONV_BUF_TYPE *dst16 = conv_params->dst; in av1_convolve_2d_scale_c() local 512 int32_t tmp = dst16[y * dst16_stride + x]; in av1_convolve_2d_scale_c() 525 dst16[y * dst16_stride + x] = res; in av1_convolve_2d_scale_c() 751 const uint16_t *src, int src_stride, uint16_t *dst16, int dst16_stride, in av1_highbd_dist_wtd_convolve_2d_c() argument 808 dst16[y * dst16_stride + x] = in av1_highbd_dist_wtd_convolve_2d_c() 818 const uint16_t *src, int src_stride, uint16_t *dst16, int dst16_stride, in av1_highbd_dist_wtd_convolve_x_c() argument 857 dst16[y * dst16_stride + x] = in av1_highbd_dist_wtd_convolve_x_c() 867 const uint16_t *src, int src_stride, uint16_t *dst16, int dst16_stride, in av1_highbd_dist_wtd_convolve_y_c() argument 906 dst16[y * dst16_stride + x] = in av1_highbd_dist_wtd_convolve_y_c() 916 const uint16_t *src, int src_stride, uint16_t *dst16, int dst16_stride, in av1_highbd_dist_wtd_convolve_2d_copy_c() argument [all …]
|
/external/libaom/libaom/av1/common/x86/ |
D | av1_convolve_scale_sse4.c | 120 CONV_BUF_TYPE *dst16 = conv_params->dst; in vfilter8() local 171 CONV_BUF_TYPE *dst_16_x = dst16 + y * dst16_stride + x; in vfilter8() 209 int32_t tmp = dst16[y * dst16_stride + x]; in vfilter8() 221 dst16[y * dst16_stride + x] = res; in vfilter8() 347 CONV_BUF_TYPE *dst16 = conv_params->dst; in highbd_vfilter8() local 403 CONV_BUF_TYPE *dst_16_x = dst16 + y * dst16_stride + x; in highbd_vfilter8() 445 int32_t tmp = dst16[y * dst16_stride + x]; in highbd_vfilter8() 459 dst16[y * dst16_stride + x] = res; in highbd_vfilter8()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_encodemb.c | 643 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst); in encode_block() local 646 vp9_highbd_idct32x32_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block], in encode_block() 650 vp9_highbd_idct16x16_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block], in encode_block() 654 vp9_highbd_idct8x8_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block], in encode_block() 662 x->highbd_inv_txfm_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block], in encode_block() 810 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst); in vp9_encode_block_intra() local 826 vp9_highbd_idct32x32_add(dqcoeff, dst16, dst_stride, *eob, xd->bd); in vp9_encode_block_intra() 846 vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst16, dst_stride, *eob, in vp9_encode_block_intra() 867 vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst16, dst_stride, *eob, in vp9_encode_block_intra() 893 x->highbd_inv_txfm_add(dqcoeff, dst16, dst_stride, *eob, xd->bd); in vp9_encode_block_intra() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/AArch64/ |
D | free-zext.ll | 30 define i32 @test_free_zext2(i32* %ptr, i16* %dst16, i32* %dst32, i32 %c) { 42 store i16 %trunc, i16* %dst16, align 2
|
/external/llvm/test/Transforms/CodeGenPrepare/AArch64/ |
D | free-zext.ll | 30 define i32 @test_free_zext2(i32* %ptr, i16* %dst16, i32* %dst32, i32 %c) { 42 store i16 %trunc, i16* %dst16, align 2
|
/external/libvpx/libvpx/vpx_scale/generic/ |
D | yv12extend.c | 320 uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst); in vpx_yv12_copy_y_c() local 322 memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t)); in vpx_yv12_copy_y_c() 324 dst16 += dst_ybc->y_stride; in vpx_yv12_copy_y_c()
|