• Home
  • Raw
  • Download

Lines Matching refs:in3

245 #define SW4(in0, in1, in2, in3, pdst, stride)  \  argument
250 SW(in3, (pdst) + 3 * stride); \
260 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument
265 SD(in3, (pdst) + 3 * stride); \
373 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
376 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
383 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \ argument
385 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
390 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
392 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
515 #define ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \ argument
519 ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
531 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
542 out3_m = __msa_copy_u_d((v2i64) in3, 0); \
551 out11_m = __msa_copy_u_w((v4i32) in3, 2); \
594 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
597 out1 = (RTYPE) __msa_aver_u_b((v16u8) in2, (v16u8) in3); \
601 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
604 AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
660 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument
663 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
670 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ argument
673 VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
696 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument
699 out1 = (RTYPE) __msa_vshf_h((v8i16) mask1, (v8i16) in3, (v8i16) in2); \
703 #define VSHF_H3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ argument
706 VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
720 #define VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument
723 out1 = (RTYPE) __msa_vshf_w((v4i32) mask1, (v4i32) in3, (v4i32) in2); \
912 #define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \ argument
915 MIN_UH2(RTYPE, in2, in3, min_vec); \
951 #define CLIP_SH4_0_255(in0, in1, in2, in3) \ argument
954 CLIP_SH2_0_255(in2, in3); \
957 #define CLIP_SH8_0_255(in0, in1, in2, in3, \ argument
960 CLIP_SH4_0_255(in0, in1, in2, in3); \
982 #define CLIP_SW4_0_255(in0, in1, in2, in3) \ argument
985 CLIP_SW2_0_255(in2, in3); \
988 #define CLIP_SW8_0_255(in0, in1, in2, in3, \ argument
991 CLIP_SW4_0_255(in0, in1, in2, in3); \
1050 #define HADD_SB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \ argument
1053 HADD_SB2(RTYPE, in2, in3, out2, out3); \
1080 #define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \ argument
1083 HADD_UB2(RTYPE, in2, in3, out2, out3); \
1105 #define HSUB_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \ argument
1108 HSUB_UB2(RTYPE, in2, in3, out2, out3); \
1160 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \ argument
1165 out = (RTYPE) __msa_insert_w((v4i32) out, 3, in3); \
1197 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1200 out1 = (RTYPE) __msa_ilvev_b((v16i8) in3, (v16i8) in2); \
1216 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1219 out1 = (RTYPE) __msa_ilvev_h((v8i16) in3, (v8i16) in2); \
1234 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1237 out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
1253 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1256 out1 = (RTYPE) __msa_ilvev_d((v2i64) in3, (v2i64) in2); \
1271 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1274 out1 = (RTYPE) __msa_ilvl_b((v16i8) in2, (v16i8) in3); \
1281 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1284 ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1301 #define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1304 out1 = (RTYPE) __msa_ilvl_h((v8i16) in2, (v8i16) in3); \
1309 #define ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1312 ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1327 #define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1330 out1 = (RTYPE) __msa_ilvl_w((v4i32) in2, (v4i32) in3); \
1346 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1349 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
1357 #define ILVR_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \ argument
1359 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1367 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1370 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1379 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1383 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1401 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1404 out1 = (RTYPE) __msa_ilvr_h((v8i16) in2, (v8i16) in3); \
1409 #define ILVR_H3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \ argument
1411 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1416 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1419 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1425 #define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1428 out1 = (RTYPE) __msa_ilvr_w((v4i32) in2, (v4i32) in3); \
1434 #define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1437 ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
1452 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1455 out1 = (RTYPE) __msa_ilvr_d((v2i64) in2, (v2i64) in3); \
1461 #define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \ argument
1463 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1468 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1471 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1486 #define ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1489 out1 = (RTYPE) __msa_ilvl_d((v2i64) in2, (v2i64) in3); \
1550 #define MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val) \ argument
1553 MAXI_SH2(RTYPE, in2, in3, max_val); \
1558 #define MAXI_SH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, max_val) \ argument
1560 MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val); \
1584 #define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \ argument
1587 SAT_UH2(RTYPE, in2, in3, sat_val); \
1592 #define SAT_UH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, sat_val) \ argument
1594 SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val); \
1624 #define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \ argument
1627 SAT_SH2(RTYPE, in2, in3, sat_val); \
1648 #define SAT_SW4(RTYPE, in0, in1, in2, in3, sat_val) \ argument
1651 SAT_SW2(RTYPE, in2, in3, sat_val); \
1728 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1731 out1 = (RTYPE) __msa_pckev_b((v16i8) in2, (v16i8) in3); \
1738 #define PCKEV_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \ argument
1740 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1746 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1749 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1768 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1771 out1 = (RTYPE) __msa_pckev_h((v8i16) in2, (v8i16) in3); \
1776 #define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1779 PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1796 #define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1799 out1 = (RTYPE) __msa_pckev_d((v2i64) in2, (v2i64) in3); \
1805 #define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1808 PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1822 #define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1825 out1 = (RTYPE) __msa_pckod_d((v2i64) in2, (v2i64) in3); \
1859 #define XORI_B4_128(RTYPE, in0, in1, in2, in3) \ argument
1862 XORI_B2_128(RTYPE, in2, in3); \
1868 #define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \ argument
1871 XORI_B2_128(RTYPE, in3, in4); \
1875 #define XORI_B6_128(RTYPE, in0, in1, in2, in3, in4, in5) \ argument
1877 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1882 #define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \ argument
1884 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1889 #define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \ argument
1891 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1906 #define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
1909 out1 = (RTYPE) __msa_adds_s_h((v8i16) in2, (v8i16) in3); \
1913 #define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
1916 ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
1935 #define SLLI_4V(in0, in1, in2, in3, shift) \ argument
1940 in3 = in3 << shift; \
1953 #define SRA_4V(in0, in1, in2, in3, shift) \ argument
1958 in3 = in3 >> shift; \
1971 #define SRL_H4(RTYPE, in0, in1, in2, in3, shift) \ argument
1976 in3 = (RTYPE) __msa_srl_h((v8i16) in3, (v8i16) shift); \
1980 #define SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \ argument
1985 in3 = (RTYPE) __msa_srlr_h((v8i16) in3, (v8i16) shift); \
1990 #define SRLR_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, shift) \ argument
1992 SRLR_H4(RTYPE, in0, in1, in2, in3, shift); \
2024 #define SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \ argument
2027 SRAR_H2(RTYPE, in2, in3, shift) \
2050 #define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \ argument
2053 SRAR_W2(RTYPE, in2, in3, shift) \
2075 #define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ argument
2078 SRARI_H2(RTYPE, in2, in3, shift); \
2100 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \ argument
2103 SRARI_W2(RTYPE, in2, in3, shift); \
2115 #define MUL2(in0, in1, in2, in3, out0, out1) \ argument
2118 out1 = in2 * in3; \
2120 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ argument
2122 MUL2(in0, in1, in2, in3, out0, out1); \
2132 #define ADD2(in0, in1, in2, in3, out0, out1) \ argument
2135 out1 = in2 + in3; \
2137 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ argument
2139 ADD2(in0, in1, in2, in3, out0, out1); \
2149 #define SUB2(in0, in1, in2, in3, out0, out1) \ argument
2152 out1 = in2 - in3; \
2154 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ argument
2157 out1 = in2 - in3; \
2286 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
2288 out0 = in0 + in3; \
2292 out3 = in0 - in3; \
2300 #define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2306 out3 = in3 + in4; \
2308 out4 = in3 - in4; \
2319 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2327 out3 = in3 + in12; \
2337 out12 = in3 - in12; \
2349 #define TRANSPOSE4x4_UB_UB(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
2354 ILVR_D2_SB(in1, in0, in3, in2, s0_m, s1_m); \
2369 #define TRANSPOSE8x4_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2376 ILVEV_W2_SB(in2, in6, in3, in7, tmp0_m, tmp1_m); \
2396 #define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2403 ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \
2422 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2437 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
2457 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2465 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
2503 #define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
2507 ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
2519 #define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2530 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2532 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2550 #define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
2555 ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
2577 #define AVE_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
2583 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2608 #define AVE_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
2613 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2634 #define AVER_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
2639 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2663 #define AVER_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
2667 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2687 #define AVER_DST_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2694 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2715 #define AVER_DST_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
2722 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2733 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \ argument
2742 ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
2800 #define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \ argument
2807 tmp1_m = PCKEV_XORI128_UB(in2, in3); \
2817 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \ argument
2822 PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \