/external/libaom/libaom/av1/common/arm/ |
D | av1_inv_txfm_neon.c | 392 int16x8_t step1[8], step2[8]; in idct8_neon() local 403 btf_16_lane_0_1_neon(in[0], in[4], c1, &step2[0], &step2[1]); in idct8_neon() 404 btf_16_lane_2_3_neon(in[2], in[6], c1, &step2[3], &step2[2]); in idct8_neon() 405 step2[4] = vqaddq_s16(step1[4], step1[5]); in idct8_neon() 406 step2[5] = vqsubq_s16(step1[4], step1[5]); in idct8_neon() 407 step2[6] = vqsubq_s16(step1[7], step1[6]); in idct8_neon() 408 step2[7] = vqaddq_s16(step1[7], step1[6]); in idct8_neon() 411 step1[0] = vqaddq_s16(step2[0], step2[3]); in idct8_neon() 412 step1[1] = vqaddq_s16(step2[1], step2[2]); in idct8_neon() 413 step1[2] = vqsubq_s16(step2[1], step2[2]); in idct8_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct16x16_add_neon.c | 75 int16x8_t in[16], step1[16], step2[16], out[16]; in vpx_idct16x16_256_add_half1d() local 171 step2[0] = step1[0]; in vpx_idct16x16_256_add_half1d() 172 step2[1] = step1[1]; in vpx_idct16x16_256_add_half1d() 173 step2[2] = step1[2]; in vpx_idct16x16_256_add_half1d() 174 step2[3] = step1[3]; in vpx_idct16x16_256_add_half1d() 175 step2[4] = step1[4]; in vpx_idct16x16_256_add_half1d() 176 step2[5] = step1[5]; in vpx_idct16x16_256_add_half1d() 177 step2[6] = step1[6]; in vpx_idct16x16_256_add_half1d() 178 step2[7] = step1[7]; in vpx_idct16x16_256_add_half1d() 179 idct_cospi_2_30(step1[8], step1[15], cospi_2_30_10_22, &step2[8], &step2[15]); in vpx_idct16x16_256_add_half1d() [all …]
|
D | highbd_idct16x16_add_neon.c | 464 const int32x4x2_t *const step2, int32x4x2_t *const out) { in highbd_idct16x16_add_stage7_dual() argument 465 out[0].val[0] = vaddq_s32(step2[0].val[0], step2[15].val[0]); in highbd_idct16x16_add_stage7_dual() 466 out[0].val[1] = vaddq_s32(step2[0].val[1], step2[15].val[1]); in highbd_idct16x16_add_stage7_dual() 467 out[1].val[0] = vaddq_s32(step2[1].val[0], step2[14].val[0]); in highbd_idct16x16_add_stage7_dual() 468 out[1].val[1] = vaddq_s32(step2[1].val[1], step2[14].val[1]); in highbd_idct16x16_add_stage7_dual() 469 out[2].val[0] = vaddq_s32(step2[2].val[0], step2[13].val[0]); in highbd_idct16x16_add_stage7_dual() 470 out[2].val[1] = vaddq_s32(step2[2].val[1], step2[13].val[1]); in highbd_idct16x16_add_stage7_dual() 471 out[3].val[0] = vaddq_s32(step2[3].val[0], step2[12].val[0]); in highbd_idct16x16_add_stage7_dual() 472 out[3].val[1] = vaddq_s32(step2[3].val[1], step2[12].val[1]); in highbd_idct16x16_add_stage7_dual() 473 out[4].val[0] = vaddq_s32(step2[4].val[0], step2[11].val[0]); in highbd_idct16x16_add_stage7_dual() [all …]
|
D | idct_neon.h | 323 int16x4_t step1[8], step2[8]; in idct8x8_12_pass1_bd8() local 335 step2[1] = vqrdmulh_lane_s16(io[0], cospisd0, 2); in idct8x8_12_pass1_bd8() 336 step2[2] = vqrdmulh_lane_s16(io[2], cospisd0, 3); in idct8x8_12_pass1_bd8() 337 step2[3] = vqrdmulh_lane_s16(io[2], cospisd0, 1); in idct8x8_12_pass1_bd8() 339 step2[4] = vadd_s16(step1[4], step1[5]); in idct8x8_12_pass1_bd8() 340 step2[5] = vsub_s16(step1[4], step1[5]); in idct8x8_12_pass1_bd8() 341 step2[6] = vsub_s16(step1[7], step1[6]); in idct8x8_12_pass1_bd8() 342 step2[7] = vadd_s16(step1[7], step1[6]); in idct8x8_12_pass1_bd8() 345 step1[0] = vadd_s16(step2[1], step2[3]); in idct8x8_12_pass1_bd8() 346 step1[1] = vadd_s16(step2[1], step2[2]); in idct8x8_12_pass1_bd8() [all …]
|
D | highbd_idct8x8_add_neon.c | 76 int32x4_t step1[8], step2[8]; in idct8x8_12_half1d_bd10() local 91 step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0); in idct8x8_12_half1d_bd10() 92 step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1); in idct8x8_12_half1d_bd10() 93 step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1); in idct8x8_12_half1d_bd10() 94 step2[1] = vrshrq_n_s32(step2[1], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 95 step2[2] = vrshrq_n_s32(step2[2], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 96 step2[3] = vrshrq_n_s32(step2[3], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 98 step2[4] = vaddq_s32(step1[4], step1[5]); in idct8x8_12_half1d_bd10() 99 step2[5] = vsubq_s32(step1[4], step1[5]); in idct8x8_12_half1d_bd10() 100 step2[6] = vsubq_s32(step1[7], step1[6]); in idct8x8_12_half1d_bd10() [all …]
|
D | highbd_idct_neon.h | 171 int32x4_t step1[8], step2[8]; in idct8x8_64_half1d_bd10() local 192 step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0); in idct8x8_64_half1d_bd10() 193 step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1); in idct8x8_64_half1d_bd10() 194 step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1); in idct8x8_64_half1d_bd10() 196 step2[0] = vmlaq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0); in idct8x8_64_half1d_bd10() 197 step2[1] = vmlsq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0); in idct8x8_64_half1d_bd10() 198 step2[2] = vmlsq_lane_s32(step2[2], *io6, vget_low_s32(cospis0), 1); in idct8x8_64_half1d_bd10() 199 step2[3] = vmlaq_lane_s32(step2[3], *io6, vget_high_s32(cospis0), 1); in idct8x8_64_half1d_bd10() 201 step2[0] = vrshrq_n_s32(step2[0], DCT_CONST_BITS); in idct8x8_64_half1d_bd10() 202 step2[1] = vrshrq_n_s32(step2[1], DCT_CONST_BITS); in idct8x8_64_half1d_bd10() [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_idct32x32_add_sse4.c | 23 __m128i step2[32]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() local 26 step2[8] = step1[8]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 27 step2[15] = step1[15]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 29 &step2[9], &step2[14]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 31 &step2[10], &step2[13]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 32 step2[11] = step1[11]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 33 step2[12] = step1[12]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 36 step1[8] = _mm_add_epi32(step2[8], step2[11]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 37 step1[9] = _mm_add_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 38 step1[10] = _mm_sub_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() [all …]
|
D | highbd_idct32x32_add_sse2.c | 19 __m128i step2[32]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() local 22 step2[8] = step1[8]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 23 step2[15] = step1[15]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 24 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in highbd_idct32_4x32_quarter_2_stage_4_to_6() 25 &step2[14]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 27 &step2[13], &step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 28 step2[11] = step1[11]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 29 step2[12] = step1[12]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 32 step1[8] = _mm_add_epi32(step2[8], step2[11]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 33 step1[9] = _mm_add_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() [all …]
|
D | inv_txfm_sse2.h | 252 __m128i step1[8], step2[8]; in idct8() local 259 butterfly(in[0], in[4], cospi_16_64, cospi_16_64, &step2[1], &step2[0]); in idct8() 260 butterfly(in[2], in[6], cospi_24_64, cospi_8_64, &step2[2], &step2[3]); in idct8() 262 step2[4] = _mm_add_epi16(step1[4], step1[5]); in idct8() 263 step2[5] = _mm_sub_epi16(step1[4], step1[5]); in idct8() 264 step2[6] = _mm_sub_epi16(step1[7], step1[6]); in idct8() 265 step2[7] = _mm_add_epi16(step1[7], step1[6]); in idct8() 268 step1[0] = _mm_add_epi16(step2[0], step2[3]); in idct8() 269 step1[1] = _mm_add_epi16(step2[1], step2[2]); in idct8() 270 step1[2] = _mm_sub_epi16(step2[1], step2[2]); in idct8() [all …]
|
D | highbd_idct16x16_add_sse4.c | 57 __m128i step1[16], step2[16]; in vpx_highbd_idct16_4col_sse4_1() local 60 highbd_butterfly_sse4_1(io[1], io[15], cospi_30_64, cospi_2_64, &step2[8], in vpx_highbd_idct16_4col_sse4_1() 61 &step2[15]); in vpx_highbd_idct16_4col_sse4_1() 62 highbd_butterfly_sse4_1(io[9], io[7], cospi_14_64, cospi_18_64, &step2[9], in vpx_highbd_idct16_4col_sse4_1() 63 &step2[14]); in vpx_highbd_idct16_4col_sse4_1() 64 highbd_butterfly_sse4_1(io[5], io[11], cospi_22_64, cospi_10_64, &step2[10], in vpx_highbd_idct16_4col_sse4_1() 65 &step2[13]); in vpx_highbd_idct16_4col_sse4_1() 66 highbd_butterfly_sse4_1(io[13], io[3], cospi_6_64, cospi_26_64, &step2[11], in vpx_highbd_idct16_4col_sse4_1() 67 &step2[12]); in vpx_highbd_idct16_4col_sse4_1() 74 step1[8] = _mm_add_epi32(step2[8], step2[9]); in vpx_highbd_idct16_4col_sse4_1() [all …]
|
D | highbd_idct16x16_add_sse2.c | 56 __m128i step1[16], step2[16]; in highbd_idct16_4col() local 59 highbd_butterfly_sse2(io[1], io[15], cospi_30_64, cospi_2_64, &step2[8], in highbd_idct16_4col() 60 &step2[15]); in highbd_idct16_4col() 61 highbd_butterfly_sse2(io[9], io[7], cospi_14_64, cospi_18_64, &step2[9], in highbd_idct16_4col() 62 &step2[14]); in highbd_idct16_4col() 63 highbd_butterfly_sse2(io[5], io[11], cospi_22_64, cospi_10_64, &step2[10], in highbd_idct16_4col() 64 &step2[13]); in highbd_idct16_4col() 65 highbd_butterfly_sse2(io[13], io[3], cospi_6_64, cospi_26_64, &step2[11], in highbd_idct16_4col() 66 &step2[12]); in highbd_idct16_4col() 73 step1[8] = _mm_add_epi32(step2[8], step2[9]); in highbd_idct16_4col() [all …]
|
D | inv_txfm_ssse3.c | 57 __m128i step1[8], step2[8]; in idct32_34_8x32_quarter_1() local 63 step2[0] = partial_butterfly_cospi16_ssse3(in[0]); in idct32_34_8x32_quarter_1() 64 step2[4] = step1[4]; in idct32_34_8x32_quarter_1() 65 step2[5] = step1[4]; in idct32_34_8x32_quarter_1() 66 step2[6] = step1[7]; in idct32_34_8x32_quarter_1() 67 step2[7] = step1[7]; in idct32_34_8x32_quarter_1() 70 step1[0] = step2[0]; in idct32_34_8x32_quarter_1() 71 step1[1] = step2[0]; in idct32_34_8x32_quarter_1() 72 step1[2] = step2[0]; in idct32_34_8x32_quarter_1() 73 step1[3] = step2[0]; in idct32_34_8x32_quarter_1() [all …]
|
D | inv_txfm_ssse3.h | 34 __m128i step1[8], step2[8], tmp[4]; in idct8x8_12_add_kernel_ssse3() local 51 step2[0] = _mm_mulhrs_epi16(tmp[0], cospi_16_64d); // step2 0&1 in idct8x8_12_add_kernel_ssse3() 52 step2[2] = _mm_mulhrs_epi16(tmp[2], cp_8d_24d); // step2 3&2 in idct8x8_12_add_kernel_ssse3() 53 step2[4] = _mm_add_epi16(step1[4], step1[5]); // step2 4&7 in idct8x8_12_add_kernel_ssse3() 54 step2[5] = _mm_sub_epi16(step1[4], step1[5]); // step2 5&6 in idct8x8_12_add_kernel_ssse3() 55 step2[6] = _mm_unpackhi_epi64(step2[5], step2[5]); // step2 6 in idct8x8_12_add_kernel_ssse3() 58 tmp[0] = _mm_unpacklo_epi16(step2[6], step2[5]); in idct8x8_12_add_kernel_ssse3() 60 tmp[0] = _mm_add_epi16(step2[0], step2[2]); // step1 0&1 in idct8x8_12_add_kernel_ssse3() 61 tmp[1] = _mm_sub_epi16(step2[0], step2[2]); // step1 3&2 in idct8x8_12_add_kernel_ssse3() 66 tmp[0] = _mm_add_epi16(step1[3], step2[4]); // output 3&0 in idct8x8_12_add_kernel_ssse3() [all …]
|
D | highbd_idct8x8_add_sse4.c | 21 __m128i step1[8], step2[8]; in vpx_highbd_idct8x8_half1d_sse4_1() local 36 highbd_butterfly_cospi16_sse4_1(step1[0], step1[2], &step2[0], &step2[1]); in vpx_highbd_idct8x8_half1d_sse4_1() 38 &step2[2], &step2[3]); in vpx_highbd_idct8x8_half1d_sse4_1() 39 step2[4] = _mm_add_epi32(step1[4], step1[5]); in vpx_highbd_idct8x8_half1d_sse4_1() 40 step2[5] = _mm_sub_epi32(step1[4], step1[5]); in vpx_highbd_idct8x8_half1d_sse4_1() 41 step2[6] = _mm_sub_epi32(step1[7], step1[6]); in vpx_highbd_idct8x8_half1d_sse4_1() 42 step2[7] = _mm_add_epi32(step1[7], step1[6]); in vpx_highbd_idct8x8_half1d_sse4_1() 45 step1[0] = _mm_add_epi32(step2[0], step2[3]); in vpx_highbd_idct8x8_half1d_sse4_1() 46 step1[1] = _mm_add_epi32(step2[1], step2[2]); in vpx_highbd_idct8x8_half1d_sse4_1() 47 step1[2] = _mm_sub_epi32(step2[1], step2[2]); in vpx_highbd_idct8x8_half1d_sse4_1() [all …]
|
D | highbd_idct8x8_add_sse2.c | 19 __m128i step1[8], step2[8]; in highbd_idct8x8_half1d() local 34 highbd_butterfly_cospi16_sse2(step1[0], step1[2], &step2[0], &step2[1]); in highbd_idct8x8_half1d() 35 highbd_butterfly_sse2(step1[1], step1[3], cospi_24_64, cospi_8_64, &step2[2], in highbd_idct8x8_half1d() 36 &step2[3]); in highbd_idct8x8_half1d() 37 step2[4] = _mm_add_epi32(step1[4], step1[5]); in highbd_idct8x8_half1d() 38 step2[5] = _mm_sub_epi32(step1[4], step1[5]); in highbd_idct8x8_half1d() 39 step2[6] = _mm_sub_epi32(step1[7], step1[6]); in highbd_idct8x8_half1d() 40 step2[7] = _mm_add_epi32(step1[7], step1[6]); in highbd_idct8x8_half1d() 43 step1[0] = _mm_add_epi32(step2[0], step2[3]); in highbd_idct8x8_half1d() 44 step1[1] = _mm_add_epi32(step2[1], step2[2]); in highbd_idct8x8_half1d() [all …]
|
D | fwd_dct32x32_impl_sse2.h | 116 __m128i step2[32]; in FDCT32x32_2D() local 366 step2[0] = ADD_EPI16(step1[0], step1[15]); in FDCT32x32_2D() 367 step2[1] = ADD_EPI16(step1[1], step1[14]); in FDCT32x32_2D() 368 step2[2] = ADD_EPI16(step1[2], step1[13]); in FDCT32x32_2D() 369 step2[3] = ADD_EPI16(step1[3], step1[12]); in FDCT32x32_2D() 370 step2[4] = ADD_EPI16(step1[4], step1[11]); in FDCT32x32_2D() 371 step2[5] = ADD_EPI16(step1[5], step1[10]); in FDCT32x32_2D() 372 step2[6] = ADD_EPI16(step1[6], step1[9]); in FDCT32x32_2D() 373 step2[7] = ADD_EPI16(step1[7], step1[8]); in FDCT32x32_2D() 374 step2[8] = SUB_EPI16(step1[7], step1[8]); in FDCT32x32_2D() [all …]
|
D | fwd_dct32x32_impl_avx2.h | 101 __m256i step2[32]; in FDCT32x32_2D_AVX2() local 339 step2[0] = _mm256_add_epi16(step1[0], step1[15]); in FDCT32x32_2D_AVX2() 340 step2[1] = _mm256_add_epi16(step1[1], step1[14]); in FDCT32x32_2D_AVX2() 341 step2[2] = _mm256_add_epi16(step1[2], step1[13]); in FDCT32x32_2D_AVX2() 342 step2[3] = _mm256_add_epi16(step1[3], step1[12]); in FDCT32x32_2D_AVX2() 343 step2[4] = _mm256_add_epi16(step1[4], step1[11]); in FDCT32x32_2D_AVX2() 344 step2[5] = _mm256_add_epi16(step1[5], step1[10]); in FDCT32x32_2D_AVX2() 345 step2[6] = _mm256_add_epi16(step1[6], step1[9]); in FDCT32x32_2D_AVX2() 346 step2[7] = _mm256_add_epi16(step1[7], step1[8]); in FDCT32x32_2D_AVX2() 347 step2[8] = _mm256_sub_epi16(step1[7], step1[8]); in FDCT32x32_2D_AVX2() [all …]
|
D | inv_txfm_sse2.c | 893 __m128i step1[8], step2[8]; in idct32_34_8x32_quarter_1() local 899 step2[0] = butterfly_cospi16(in[0]); in idct32_34_8x32_quarter_1() 900 step2[4] = step1[4]; in idct32_34_8x32_quarter_1() 901 step2[5] = step1[4]; in idct32_34_8x32_quarter_1() 902 step2[6] = step1[7]; in idct32_34_8x32_quarter_1() 903 step2[7] = step1[7]; in idct32_34_8x32_quarter_1() 906 step1[0] = step2[0]; in idct32_34_8x32_quarter_1() 907 step1[1] = step2[0]; in idct32_34_8x32_quarter_1() 908 step1[2] = step2[0]; in idct32_34_8x32_quarter_1() 909 step1[3] = step2[0]; in idct32_34_8x32_quarter_1() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | inv_txfm.c | 272 int16_t step1[8], step2[8]; in idct8_c() local 292 step2[0] = WRAPLOW(dct_const_round_shift(temp1)); in idct8_c() 293 step2[1] = WRAPLOW(dct_const_round_shift(temp2)); in idct8_c() 296 step2[2] = WRAPLOW(dct_const_round_shift(temp1)); in idct8_c() 297 step2[3] = WRAPLOW(dct_const_round_shift(temp2)); in idct8_c() 298 step2[4] = WRAPLOW(step1[4] + step1[5]); in idct8_c() 299 step2[5] = WRAPLOW(step1[4] - step1[5]); in idct8_c() 300 step2[6] = WRAPLOW(-step1[6] + step1[7]); in idct8_c() 301 step2[7] = WRAPLOW(step1[6] + step1[7]); in idct8_c() 304 step1[0] = WRAPLOW(step2[0] + step2[3]); in idct8_c() [all …]
|
D | fwd_txfm.c | 198 tran_high_t step2[8]; // canbe16 in vpx_fdct16x16_c() local 302 step2[2] = fdct_round_shift(temp1); in vpx_fdct16x16_c() 303 step2[3] = fdct_round_shift(temp2); in vpx_fdct16x16_c() 306 step2[4] = fdct_round_shift(temp1); in vpx_fdct16x16_c() 307 step2[5] = fdct_round_shift(temp2); in vpx_fdct16x16_c() 309 step3[0] = step1[0] + step2[3]; in vpx_fdct16x16_c() 310 step3[1] = step1[1] + step2[2]; in vpx_fdct16x16_c() 311 step3[2] = step1[1] - step2[2]; in vpx_fdct16x16_c() 312 step3[3] = step1[0] - step2[3]; in vpx_fdct16x16_c() 313 step3[4] = step1[7] - step2[4]; in vpx_fdct16x16_c() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 96 tran_high_t step2[8]; // canbe16 in fdct16() local 176 step2[2] = fdct_round_shift(temp1); in fdct16() 177 step2[3] = fdct_round_shift(temp2); in fdct16() 180 step2[4] = fdct_round_shift(temp1); in fdct16() 181 step2[5] = fdct_round_shift(temp2); in fdct16() 184 step3[0] = step1[0] + step2[3]; in fdct16() 185 step3[1] = step1[1] + step2[2]; in fdct16() 186 step3[2] = step1[1] - step2[2]; in fdct16() 187 step3[3] = step1[0] - step2[3]; in fdct16() 188 step3[4] = step1[7] - step2[4]; in fdct16() [all …]
|
/external/autotest/client/tests/error_skip_step/ |
D | control | 17 job.next_step('step2') 28 def step2(): 29 print 'screw you step0, I am the coolest. -step2'
|
/external/llvm-project/mlir/test/Transforms/ |
D | parametric-tiling.mlir | 35 // TILE_74-NEXT: %[[step2:.*]] = muli %c2, %[[size2]] 39 // TILE_74:scf.for %[[j:.*]] = %c1 to %c44 step %[[step2]] 46 // TILE_74: %[[stepped2:.*]] = addi %[[j]], %[[step2]] 104 // TILE_74-NEXT: %[[step2:.*]] = muli %c2, %[[size2]] 107 // TILE_74:scf.for %[[j:.*]] = %c1 to %[[i]] step %[[step2]] 113 // TILE_74: %[[stepped2:.*]] = addi %[[j]], %[[step2]]
|
/external/dng_sdk/source/ |
D | dng_utils.cpp | 443 int32 step2 = buffer.fColStep; in HistogramArea() local 452 step2); in HistogramArea() 461 if (maxValue == 0x0FFFF && step2 == 1) in HistogramArea() 492 s2 += step2; in HistogramArea()
|
/external/testng/src/test/java/test/dependent/ |
D | SampleDependentMethods5.java | 17 public void step2() { in step2() method in SampleDependentMethods5
|