/external/libaom/libaom/aom_util/ |
D | debug_util.c | 126 const uint16_t *src16 = highbd ? CONVERT_TO_SHORTPTR(src) : NULL; in mismatch_record_block_pre() local 131 src16 ? src16[r * src_stride + c] : src[r * src_stride + c]; in mismatch_record_block_pre() 159 const uint16_t *src16 = highbd ? CONVERT_TO_SHORTPTR(src) : NULL; in mismatch_record_block_tx() local 164 src16 ? src16[r * src_stride + c] : src[r * src_stride + c]; in mismatch_record_block_tx() 191 const uint16_t *src16 = highbd ? CONVERT_TO_SHORTPTR(src) : NULL; in mismatch_check_block_pre() local 197 (uint16_t)(src16 ? src16[r * src_stride + c] in mismatch_check_block_pre() 222 src16 ? src16[rr * src_stride + cc] : src[rr * src_stride + cc]); in mismatch_check_block_pre() 237 const uint16_t *src16 = highbd ? CONVERT_TO_SHORTPTR(src) : NULL; in mismatch_check_block_tx() local 243 (uint16_t)(src16 ? src16[r * src_stride + c] in mismatch_check_block_tx() 268 src16 ? src16[rr * src_stride + cc] : src[rr * src_stride + cc]); in mismatch_check_block_tx()
|
/external/libaom/libaom/aom_scale/generic/ |
D | yv12extend.c | 259 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in aom_yv12_copy_y_c() local 262 memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t)); in aom_yv12_copy_y_c() 263 src16 += src_ybc->y_stride; in aom_yv12_copy_y_c() 283 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in aom_yv12_copy_u_c() local 286 memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t)); in aom_yv12_copy_u_c() 287 src16 += src_bc->uv_stride; in aom_yv12_copy_u_c() 307 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in aom_yv12_copy_v_c() local 310 memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t)); in aom_yv12_copy_v_c() 311 src16 += src_bc->uv_stride; in aom_yv12_copy_v_c() 333 const uint16_t *src16 = in aom_yv12_partial_copy_y_c() local [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 148 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 153 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 154 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 155 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 157 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 158 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 159 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 160 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | bitreverse.ll | 14 @src16 = common global [16 x i16] zeroinitializer, align 32 150 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 155 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 156 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 157 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 158 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 159 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 160 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 161 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 162 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | cttz.ll | 13 @src16 = common global [16 x i16] zeroinitializer, align 32 198 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 203 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 204 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 205 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 206 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 207 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 208 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 209 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 210 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | ctlz.ll | 13 @src16 = common global [16 x i16] zeroinitializer, align 32 198 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 203 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 204 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 205 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 206 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 207 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 208 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 209 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 210 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | uitofp.ll | 12 @src16 = common global [32 x i16] zeroinitializer, align 64 339 …0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 0), al… 340 …1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 1), al… 347 …%ld0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 0), align… 348 …%ld1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 1), align… 358 ; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 359 ; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 360 ; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 361 ; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 373 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* bitcast ([32 x i16]* @src16 to <4 x i16>*)… [all …]
|
D | sitofp.ll | 12 @src16 = common global [32 x i16] zeroinitializer, align 64 324 …0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 0), al… 325 …1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 1), al… 332 …%ld0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 0), align… 333 …%ld1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 1), align… 343 ; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 344 ; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 345 ; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 346 ; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 358 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* bitcast ([32 x i16]* @src16 to <4 x i16>*)… [all …]
|
D | ctpop.ll | 13 @src16 = common global [16 x i16] zeroinitializer, align 32 245 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 250 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 251 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 252 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 253 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 254 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 255 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 256 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 257 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 148 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 153 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 154 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 155 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 157 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 158 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 159 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 160 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | ctlz.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 164 …0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), al… 165 …1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), al… 166 …2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), al… 167 …3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), al… 168 …4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), al… 169 …5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), al… 170 …6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), al… 171 …7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), al… 190 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… [all …]
|
D | cttz.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 164 …0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), al… 165 …1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), al… 166 …2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), al… 167 …3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), al… 168 …4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), al… 169 …5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), al… 170 …6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), al… 171 …7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), al… 190 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… [all …]
|
D | ctpop.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 136 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 141 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 142 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 143 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 144 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 145 …%ld4 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align… 146 …%ld5 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align… 147 …%ld6 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align… 148 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… [all …]
|
D | bitreverse.ll | 14 @src16 = common global [16 x i16] zeroinitializer, align 32 228 ; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 229 ; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 230 ; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 231 ; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 232 ; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 233 ; SSE-NEXT: [[LD5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 234 ; SSE-NEXT: [[LD6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 235 ; SSE-NEXT: [[LD7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 255 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*)… [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 97 __m128i src16[8]; in vpx_highbd_hadamard_8x8_avx2() local 100 src16[0] = _mm_loadu_si128((const __m128i *)src_diff); in vpx_highbd_hadamard_8x8_avx2() 101 src16[1] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 102 src16[2] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 103 src16[3] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 104 src16[4] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 105 src16[5] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 106 src16[6] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 107 src16[7] = _mm_loadu_si128((const __m128i *)(src_diff += src_stride)); in vpx_highbd_hadamard_8x8_avx2() 109 src32[0] = _mm256_cvtepi16_epi32(src16[0]); in vpx_highbd_hadamard_8x8_avx2() [all …]
|
/external/libaom/libaom/test/ |
D | transform_test_base.h | 63 uint16_t *src16 = reinterpret_cast<uint16_t *>( in RunAccuracyCheck() local 74 src16[j] = rnd.Rand16() & mask_; in RunAccuracyCheck() 76 test_input_block[j] = src16[j] - dst16[j]; in RunAccuracyCheck() 91 bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunAccuracyCheck() 113 aom_free(src16); in RunAccuracyCheck() 272 uint16_t *src16 = reinterpret_cast<uint16_t *>( in RunInvAccuracyCheck() local 283 src16[j] = rnd.Rand16() & mask_; in RunInvAccuracyCheck() 285 in[j] = src16[j] - dst16[j]; in RunInvAccuracyCheck() 300 bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunInvAccuracyCheck() 310 aom_free(src16); in RunInvAccuracyCheck()
|
D | variance_test.cc | 149 uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in subpel_variance_ref() local 157 const int diff = r - src16[w * y + x]; in subpel_variance_ref() 199 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in subpel_avg_variance_ref() local 208 const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x]; in subpel_avg_variance_ref() 252 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in dist_wtd_subpel_avg_variance_ref() local 265 const int diff = avg - src16[w * y + x]; in dist_wtd_subpel_avg_variance_ref() 486 uint16_t *const src16 = CONVERT_TO_SHORTPTR(src_); in ZeroTest() local 487 for (int k = 0; k < block_size(); ++k) src16[k] = i << byte_shift(); in ZeroTest()
|
/external/libvpx/libvpx/test/ |
D | fdct8x8_test.cc | 224 DECLARE_ALIGNED(16, uint16_t, src16[64]); in RunRoundTripErrorCheck() 236 src16[j] = rnd.Rand16() & mask_; in RunRoundTripErrorCheck() 238 test_input_block[j] = src16[j] - dst16[j]; in RunRoundTripErrorCheck() 268 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunRoundTripErrorCheck() 300 DECLARE_ALIGNED(16, uint16_t, src16[64]); in RunExtremalCheck() 321 src16[j] = mask_; in RunExtremalCheck() 324 src16[j] = 0; in RunExtremalCheck() 327 src16[j] = rnd.Rand8() % 2 ? mask_ : 0; in RunExtremalCheck() 330 test_input_block[j] = src16[j] - dst16[j]; in RunExtremalCheck() 351 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunExtremalCheck() [all …]
|
D | dct32x32_test.cc | 124 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); in TEST_P() 136 src16[j] = rnd.Rand16() & mask_; in TEST_P() 138 test_input_block[j] = src16[j] - dst16[j]; in TEST_P() 156 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in TEST_P() 271 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); in TEST_P() 285 src16[j] = rnd.Rand16() & mask_; in TEST_P() 287 in[j] = src16[j] - dst16[j]; in TEST_P() 306 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in TEST_P()
|
D | dct16x16_test.cc | 331 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); in RunAccuracyCheck() 342 src16[j] = rnd.Rand16() & mask_; in RunAccuracyCheck() 344 test_input_block[j] = src16[j] - dst16[j]; in RunAccuracyCheck() 363 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunAccuracyCheck() 503 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); in RunInvAccuracyCheck() 517 src16[j] = rnd.Rand16() & mask_; in RunInvAccuracyCheck() 519 in[j] = src16[j] - dst16[j]; in RunInvAccuracyCheck() 540 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; in RunInvAccuracyCheck()
|
D | variance_test.cc | 132 uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in subpel_variance_ref() local 140 const int diff = r - src16[w * y + x]; in subpel_variance_ref() 185 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in subpel_avg_variance_ref() local 194 const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x]; in subpel_avg_variance_ref() 370 uint16_t *const src16 = CONVERT_TO_SHORTPTR(src_); in ZeroTest() local 371 for (int k = 0; k < block_size(); ++k) src16[k] = i << byte_shift(); in ZeroTest()
|
/external/icu/icu4c/source/test/cintltst/ |
D | custrtrn.c | 103 static const UChar src16[] = { variable 152 u_strToUTF32(u32Target, 0, &u32DestLen, src16, UPRV_LENGTHOF(src16),&err); in Test_strToUTF32() 161 u_strToUTF32(u32Target, UPRV_LENGTHOF(src32)+1, &u32DestLen, src16, UPRV_LENGTHOF(src16),&err); in Test_strToUTF32() 185 u_strToUTF32(NULL,0, &u32DestLen, src16, -1,&err); in Test_strToUTF32() 194 u_strToUTF32(u32Target, UPRV_LENGTHOF(src32), &u32DestLen, src16, -1,&err); in Test_strToUTF32() 332 if(err != U_BUFFER_OVERFLOW_ERROR || uDestLen != UPRV_LENGTHOF(src16)) { in Test_strFromUTF32() 335 (long)uDestLen, (long)UPRV_LENGTHOF(src16), u_errorName(err)); in Test_strFromUTF32() 340 u_strFromUTF32(uTarget, UPRV_LENGTHOF(src16)+1,&uDestLen,src32,UPRV_LENGTHOF(src32),&err); in Test_strFromUTF32() 341 if(err != U_ZERO_ERROR || uDestLen != UPRV_LENGTHOF(src16)) { in Test_strFromUTF32() 344 (long)uDestLen, (long)UPRV_LENGTHOF(src16), u_errorName(err)); in Test_strFromUTF32() [all …]
|
/external/skia/src/core/ |
D | SkConvertPixels.cpp | 99 auto src16 = (const uint16_t*) src; in convert_to_alpha8() local 102 dst[x] = SkPacked4444ToA32(src16[x]); in convert_to_alpha8() 105 src16 = SkTAddOffset<const uint16_t>(src16, srcRB); in convert_to_alpha8()
|
/external/skqp/src/core/ |
D | SkConvertPixels.cpp | 99 auto src16 = (const uint16_t*) src; in convert_to_alpha8() local 102 dst[x] = SkPacked4444ToA32(src16[x]); in convert_to_alpha8() 105 src16 = SkTAddOffset<const uint16_t>(src16, srcRB); in convert_to_alpha8()
|
/external/libvpx/libvpx/vpx_scale/generic/ |
D | yv12extend.c | 319 const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); in vpx_yv12_copy_y_c() local 322 memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t)); in vpx_yv12_copy_y_c() 323 src16 += src_ybc->y_stride; in vpx_yv12_copy_y_c()
|