/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct32x32_msa.c | 43 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in idct32x8_row_even_process_store() local 52 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store() 56 loc0 = vec1; in idct32x8_row_even_process_store() 60 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store() 61 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4); in idct32x8_row_even_process_store() 81 vec1 = reg2; in idct32x8_row_even_process_store() 84 reg4 = reg5 - vec1; in idct32x8_row_even_process_store() 85 reg5 = reg5 + vec1; in idct32x8_row_even_process_store() 92 vec1 = reg7 - reg1; in idct32x8_row_even_process_store() 95 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 58 v8i16 filt, vec0, vec1, vec2, vec3; in common_hz_8t_and_aver_dst_4x8_msa() local 76 filt0, filt1, filt2, filt3, vec0, vec1); in common_hz_8t_and_aver_dst_4x8_msa() 81 SRARI_H4_SH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_8t_and_aver_dst_4x8_msa() 82 SAT_SH4_SH(vec0, vec1, vec2, vec3, 7); in common_hz_8t_and_aver_dst_4x8_msa() 83 PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2, in common_hz_8t_and_aver_dst_4x8_msa() 155 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_16w_msa() local 176 VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13); in common_hz_8t_and_aver_dst_16w_msa() 181 DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa() 185 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa() 189 ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1, in common_hz_8t_and_aver_dst_16w_msa() [all …]
|
D | vpx_convolve8_horiz_msa.c | 321 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local 331 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa() 332 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa() 341 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local 353 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa() 355 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa() 380 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local 389 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa() 391 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_2t_8x4_msa() 393 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_2t_8x4_msa() [all …]
|
D | sub_pixel_variance_msa.c | 408 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_4width_h_msa() local 421 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_4width_h_msa() 423 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in sub_pixel_sse_diff_4width_h_msa() 424 vec0, vec1, vec2, vec3); in sub_pixel_sse_diff_4width_h_msa() 425 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_4width_h_msa() 426 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, in sub_pixel_sse_diff_4width_h_msa() 451 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_8width_h_msa() local 465 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_8width_h_msa() 467 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in sub_pixel_sse_diff_8width_h_msa() 468 vec0, vec1, vec2, vec3); in sub_pixel_sse_diff_8width_h_msa() [all …]
|
D | fwd_txfm_msa.c | 20 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; in fdct8x16_1d_column() local 72 ILVRL_H2_SH(in15, in8, vec1, vec0); in fdct8x16_1d_column() 76 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() 81 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() 84 ILVRL_H2_SH(in14, in9, vec1, vec0); in fdct8x16_1d_column() 88 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1); in fdct8x16_1d_column() 93 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() 108 ILVRL_H2_SH(in13, in10, vec1, vec0); in fdct8x16_1d_column() 111 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() 116 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() [all …]
|
D | vpx_convolve8_avg_msa.c | 27 v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local 58 ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 71 res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1, in common_hv_8ht_8vt_and_aver_dst_4w_msa() 78 res1 = FILT_8TAP_DPADD_S_H(vec1, vec2, vec3, vec4, filt_vt0, filt_vt1, in common_hv_8ht_8vt_and_aver_dst_4w_msa() 92 vec1 = vec3; in common_hv_8ht_8vt_and_aver_dst_4w_msa() 252 v16u8 filt_hz, filt_vt, vec0, vec1; in common_hv_2ht_2vt_and_aver_dst_4x4_msa() local 272 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa() 276 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa() 290 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1, res2, res3; in common_hv_2ht_2vt_and_aver_dst_4x8_msa() local 321 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x8_msa() [all …]
|
D | vpx_convolve8_msa.c | 105 v16u8 mask0, mask1, mask2, mask3, vec0, vec1; in common_hv_8ht_8vt_8w_msa() local 180 vec1 = PCKEV_XORI128_UB(tmp2, tmp3); in common_hv_8ht_8vt_8w_msa() 181 ST8x4_UB(vec0, vec1, dst, dst_stride); in common_hv_8ht_8vt_8w_msa() 238 v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; in common_hv_2ht_2vt_4x4_msa() local 257 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x4_msa() 258 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_4x4_msa() 270 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_4x8_msa() local 296 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x8_msa() 298 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, in common_hv_2ht_2vt_4x8_msa() 326 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_8x4_msa() local [all …]
|
D | loopfilter_8_msa.c | 172 v8i16 vec0, vec1, vec2, vec3, vec4; in vpx_lpf_vertical_8_msa() local 198 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in vpx_lpf_vertical_8_msa() 199 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa() 227 ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); in vpx_lpf_vertical_8_msa() 228 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa() 257 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_lpf_vertical_8_dual_msa() local 291 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in vpx_lpf_vertical_8_dual_msa() 292 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_dual_msa() 293 ILVL_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in vpx_lpf_vertical_8_dual_msa() 294 ILVRL_H2_SH(vec1, vec0, vec4, vec5); in vpx_lpf_vertical_8_dual_msa() [all …]
|
D | fwd_dct32x32_msa.c | 61 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_column_even_store() local 68 vec0, vec1, vec2, vec3, in12, in13, in14, in15); in fdct8x32_1d_column_even_store() 75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 87 SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4); in fdct8x32_1d_column_even_store() 89 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_column_even_store() 90 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0); in fdct8x32_1d_column_even_store() 103 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store() 104 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_column_even_store() 292 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_row_even_4x() local 304 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, in fdct8x32_1d_row_even_4x() [all …]
|
D | vpx_convolve8_avg_vert_msa.c | 352 v16u8 dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3, filt0; in common_vt_2t_and_aver_dst_8x4_msa() local 362 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); in common_vt_2t_and_aver_dst_8x4_msa() 364 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x4_msa() 380 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_8x8mult_msa() local 396 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, in common_vt_2t_and_aver_dst_8x8mult_msa() 400 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x8mult_msa() 440 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_vt_2t_and_aver_dst_16w_msa() local 456 ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); in common_vt_2t_and_aver_dst_16w_msa() 457 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); in common_vt_2t_and_aver_dst_16w_msa() 492 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_32w_msa() local [all …]
|
D | vpx_convolve8_vert_msa.c | 370 v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0; in common_vt_2t_8x4_msa() local 380 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); in common_vt_2t_8x4_msa() 382 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x4_msa() 394 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local 410 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, in common_vt_2t_8x8mult_msa() 414 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x8mult_msa() 447 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_16w_msa() local 463 ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); in common_vt_2t_16w_msa() 464 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); in common_vt_2t_16w_msa() 495 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_32w_msa() local [all …]
|
D | fwd_txfm_msa.h | 291 #define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) { \ argument 296 tp1_m = __msa_clti_s_h(vec1, 0); \ 298 vec1 += 1; \ 302 vec1 += tp1_m; \ 304 vec1 >>= 2; \ 318 #define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) { \ argument 323 tp1_m = __msa_clei_s_h(vec1, 0); \ 327 vec1 += 1; \ 331 vec1 += tp1_m; \ 333 vec1 >>= 2; \
|
/external/jmonkeyengine/engine/src/bullet-native/ |
D | com_jme3_bullet_collision_shapes_SimplexCollisionShape.cpp | 51 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2() local 52 jmeBulletUtil::convert(env, vector1, &vec1); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2() 53 btBU_Simplex1to4* simplexShape = new btBU_Simplex1to4(vec1); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2() 65 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() local 66 jmeBulletUtil::convert(env, vector1, &vec1); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() 69 btBU_Simplex1to4* simplexShape = new btBU_Simplex1to4(vec1, vec2); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() 80 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() local 81 jmeBulletUtil::convert(env, vector1, &vec1); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() 86 btBU_Simplex1to4* simplexShape = new btBU_Simplex1to4(vec1, vec2, vec3); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() 97 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_collision_shapes_SimplexCollisionShape_createShape__Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2Lcom_jme3_math_Vector3f_2() local [all …]
|
D | com_jme3_bullet_objects_PhysicsRigidBody.cpp | 585 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyForce() local 587 jmeBulletUtil::convert(env, force, &vec1); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyForce() 589 body->applyForce(vec1, vec2); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyForce() 605 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyCentralForce() local 606 jmeBulletUtil::convert(env, force, &vec1); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyCentralForce() 607 body->applyCentralForce(vec1); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyCentralForce() 623 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyTorque() local 624 jmeBulletUtil::convert(env, force, &vec1); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyTorque() 625 body->applyTorque(vec1); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyTorque() 641 btVector3 vec1 = btVector3(); in Java_com_jme3_bullet_objects_PhysicsRigidBody_applyImpulse() local [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | bilinear_filter_msa.c | 42 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local 51 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa() 52 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa() 62 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local 73 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa() 75 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in common_hz_2t_4x8_msa() 105 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local 113 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa() 115 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in common_hz_2t_8x4_msa() 116 vec0, vec1, vec2, vec3); in common_hz_2t_8x4_msa() [all …]
|
/external/mesa3d/src/gallium/auxiliary/gallivm/ |
D | lp_bld_quad.c | 96 LLVMValueRef vec1, vec2; in lp_build_packed_ddx_ddy_onecoord() local 113 vec1 = lp_build_swizzle_aos(bld, a, swizzle1); in lp_build_packed_ddx_ddy_onecoord() 117 return LLVMBuildFSub(builder, vec2, vec1, "ddxddy"); in lp_build_packed_ddx_ddy_onecoord() 119 return LLVMBuildSub(builder, vec2, vec1, "ddxddy"); in lp_build_packed_ddx_ddy_onecoord() 131 LLVMValueRef vec1, vec2; in lp_build_packed_ddx_ddy_twocoord() local 149 vec1 = LLVMBuildShuffleVector(builder, a, b, in lp_build_packed_ddx_ddy_twocoord() 154 return LLVMBuildFSub(builder, vec2, vec1, "ddxddyddxddy"); in lp_build_packed_ddx_ddy_twocoord() 156 return LLVMBuildSub(builder, vec2, vec1, "ddxddyddxddy"); in lp_build_packed_ddx_ddy_twocoord()
|
/external/eigen/doc/snippets/ |
D | Tutorial_AdvancedInitialization_Join.cpp | 1 RowVectorXd vec1(3); variable 2 vec1 << 1, 2, 3; 3 std::cout << "vec1 = " << vec1 << std::endl; 10 joined << vec1, vec2;
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-copy-tuple.ll | 18 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 35 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 52 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 69 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 [all …]
|
/external/mesa3d/src/mesa/drivers/dri/i965/ |
D | brw_clip_unfilled.c | 112 vec1(brw_null_reg()), in cull_direction() 149 vec1(brw_null_reg()), in copy_bfc() 204 vec1(brw_null_reg()), in compute_offset() 209 brw_SEL(p, vec1(off), brw_abs(get_element(off, 0)), brw_abs(get_element(off, 1))); in compute_offset() 212 brw_MUL(p, vec1(off), off, brw_imm_f(c->key.offset_factor)); in compute_offset() 213 brw_ADD(p, vec1(off), off, brw_imm_f(c->key.offset_units)); in compute_offset() 224 vec1(brw_null_reg()), in merge_edgeflags() 235 brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<8)); in merge_edgeflags() 243 brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<9)); in merge_edgeflags() 264 brw_ADD(p, z, z, vec1(c->reg.offset)); in apply_one_offset() [all …]
|
D | brw_clip_line.c | 135 struct brw_reg v1_null_ud = retype(vec1(brw_null_reg()), BRW_REGISTER_TYPE_UD); in clip_and_emit_line() 191 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_LE, c->reg.dp0, brw_imm_f(0.0)); in clip_and_emit_line() 203 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t1 ); in clip_and_emit_line() 216 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.dp0, brw_imm_f(0.0)); in clip_and_emit_line() 225 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t0 ); in clip_and_emit_line() 250 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.t, brw_imm_f(1.0)); in clip_and_emit_line()
|
/external/eigen/doc/ |
D | QuickReference.dox | 344 scalar = vec1.dot(vec2); 355 scalar = vec1.norm(); scalar = vec1.squaredNorm() 356 vec2 = vec1.normalized(); vec1.normalize(); // inplace \endcode 362 vec3 = vec1.cross(vec2);\endcode</td></tr> 480 <tr><td>\code vec1.head(n)\endcode</td><td>\code vec1.head<n>()\endcode</td><td>the first \c n coef… 481 <tr><td>\code vec1.tail(n)\endcode</td><td>\code vec1.tail<n>()\endcode</td><td>the last \c n coeff… 482 <tr><td>\code vec1.segment(pos,n)\endcode</td><td>\code vec1.segment<n>(pos)\endcode</td> 547 mat1 = vec1.asDiagonal();\endcode 556 vec1 = mat1.diagonal(); mat1.diagonal() = vec1; // main diagonal 557 vec1 = mat1.diagonal(+n); mat1.diagonal(+n) = vec1; // n-th super diagonal [all …]
|
/external/eigen/test/ |
D | eigensolver_complex.cpp | 19 void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2) in verify_is_approx_upto_permutation() argument 23 VERIFY(vec1.cols() == 1); in verify_is_approx_upto_permutation() 25 VERIFY(vec1.rows() == vec2.rows()); in verify_is_approx_upto_permutation() 26 for (int k = 1; k <= vec1.rows(); ++k) in verify_is_approx_upto_permutation() 28 VERIFY_IS_APPROX(vec1.array().pow(RealScalar(k)).sum(), vec2.array().pow(RealScalar(k)).sum()); in verify_is_approx_upto_permutation()
|
/external/llvm/test/CodeGen/X86/ |
D | 2011-04-19-sclr-bb.ll | 12 %vec1 = phi <4 x i1> [ %vec1_or_2, %LOOP ], [ zeroinitializer, %ENTRY ] 14 %vec1_or_2 = or <4 x i1> %vec1, %vec2 15 %vec2_and_1 = and <4 x i1> %vec2, %vec1
|
D | scalar_sse_minmax.ll | 24 %vec1 = insertelement <4 x float> undef, float %y, i32 0 25 %retval = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %vec0, <4 x float> %vec1) 50 %vec1 = insertelement <4 x float> undef, float %y, i32 0 51 %retval = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %vec0, <4 x float> %vec1)
|
/external/llvm/test/Bitcode/ |
D | miscInstructions.3.2.ll | 67 define void @icmp(i32 %x1, i32 %x2, i32* %ptr1, i32* %ptr2, <2 x i32> %vec1, <2 x i32> %vec2){ 102 ; CHECK-NEXT: %res12 = icmp eq <2 x i32> %vec1, %vec2 103 %res12 = icmp eq <2 x i32> %vec1, %vec2 109 define void @fcmp(float %x1, float %x2, <2 x float> %vec1, <2 x float> %vec2){ 159 ; CHECK-NEXT: %res17 = fcmp oeq <2 x float> %vec1, %vec2 160 %res17 = fcmp oeq <2 x float> %vec1, %vec2
|