/external/eigen/test/ |
D | array_replicate.cpp | 32 VectorX vx1; in replicate() local 52 vx1.resize(3*rows,cols); in replicate() 53 vx1 << m2, m2, m2; in replicate() 54 VERIFY_IS_APPROX(vx1+vx1, vx1+(m2.template replicate<3,1>())); in replicate() 56 vx1=m2+(m2.colwise().replicate(1)); in replicate() 66 vx1.resize(rows*f2); in replicate() 68 vx1.segment(j*rows,rows) = v1; in replicate() 69 VERIFY_IS_APPROX(vx1, v1.colwise().replicate(f2)); in replicate()
|
/external/linux-kselftest/tools/testing/selftests/net/forwarding/ |
D | vxlan_bridge_1d.sh | 136 ip link add name vx1 type vxlan id 1000 \ 139 ip link set dev vx1 up 141 ip link set dev vx1 master br1 148 bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self 149 bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self 157 bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self 158 bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self 160 ip link set dev vx1 nomaster 161 ip link set dev vx1 down 162 ip link del dev vx1 [all …]
|
/external/linux-kselftest/tools/testing/selftests/drivers/net/mlxsw/ |
D | extack.sh | 38 ip link add name vx1 up type vxlan id 1000 \ 42 ip link set dev vx1 master br1 75 ip link del dev vx1 86 ip link add name vx1 up type vxlan id 1000 \ 90 ip link set dev vx1 master br1 93 bridge vlan add dev vx1 vid 1 99 bridge vlan add dev vx1 vid 1 pvid untagged 2>&1 >/dev/null \ 105 ip link del dev vx1 116 ip link add name vx1 up type vxlan id 1000 \ 125 ip link set dev vx1 master br1 [all …]
|
/external/XNNPACK/src/f32-vlrelu/gen/ |
D | vlrelu-scalar-x2.c | 29 const float vx1 = x[1]; in xnn_f32_vlrelu_ukernel__scalar_x2() local 33 float vacc1 = vx1 * vslope; in xnn_f32_vlrelu_ukernel__scalar_x2() 36 vacc1 = XNN_UNPREDICTABLE(vx1 < 0.0f) ? vacc1 : vx1; in xnn_f32_vlrelu_ukernel__scalar_x2()
|
D | vlrelu-scalar-x4.c | 29 const float vx1 = x[1]; in xnn_f32_vlrelu_ukernel__scalar_x4() local 35 float vacc1 = vx1 * vslope; in xnn_f32_vlrelu_ukernel__scalar_x4() 40 vacc1 = XNN_UNPREDICTABLE(vx1 < 0.0f) ? vacc1 : vx1; in xnn_f32_vlrelu_ukernel__scalar_x4()
|
D | vlrelu-wasm-x2.c | 30 const float vx1 = x[1]; in xnn_f32_vlrelu_ukernel__wasm_x2() local 34 const float vnegx1 = __builtin_wasm_min_f32(vx1, vzero); in xnn_f32_vlrelu_ukernel__wasm_x2() 39 const float vposx1 = __builtin_wasm_max_f32(vx1, vzero); in xnn_f32_vlrelu_ukernel__wasm_x2()
|
D | vlrelu-wasm-x4.c | 30 const float vx1 = x[1]; in xnn_f32_vlrelu_ukernel__wasm_x4() local 36 const float vnegx1 = __builtin_wasm_min_f32(vx1, vzero); in xnn_f32_vlrelu_ukernel__wasm_x4() 43 const float vposx1 = __builtin_wasm_max_f32(vx1, vzero); in xnn_f32_vlrelu_ukernel__wasm_x4()
|
/external/XNNPACK/src/x32-packx/ |
D | x4-wasmsimd.c | 41 const v128_t vx1 = wasm_v128_load(x1); in xnn_x32_packx_ukernel_4x__wasmsimd() local 48 const v128_t vt0 = wasm_v32x4_shuffle(vx0, vx1, 0, 4, 1, 5); in xnn_x32_packx_ukernel_4x__wasmsimd() 49 const v128_t vt1 = wasm_v32x4_shuffle(vx0, vx1, 2, 6, 3, 7); in xnn_x32_packx_ukernel_4x__wasmsimd() 70 const float vx1 = *x1++; in xnn_x32_packx_ukernel_4x__wasmsimd() local 74 y[1] = vx1; in xnn_x32_packx_ukernel_4x__wasmsimd()
|
D | x4-sse.c | 42 const __m128 vx1 = _mm_loadu_ps(x1); in xnn_x32_packx_ukernel_4x__sse() local 49 const __m128 vt0 = _mm_unpacklo_ps(vx0, vx1); in xnn_x32_packx_ukernel_4x__sse() 50 const __m128 vt1 = _mm_unpackhi_ps(vx0, vx1); in xnn_x32_packx_ukernel_4x__sse() 72 const __m128 vx1 = _mm_load_ss(x1); in xnn_x32_packx_ukernel_4x__sse() local 79 const __m128 vx01 = _mm_unpacklo_ps(vx0, vx1); in xnn_x32_packx_ukernel_4x__sse()
|
/external/XNNPACK/src/f32-hswish/gen/ |
D | hswish-wasm-x2.c | 35 float vx1 = x[1]; in xnn_f32_hswish_ukernel__wasm_x2() local 40 float vacc1 = vx1 + vthree; in xnn_f32_hswish_ukernel__wasm_x2() 41 vx1 *= vsixth; in xnn_f32_hswish_ukernel__wasm_x2() 50 vacc1 *= vx1; in xnn_f32_hswish_ukernel__wasm_x2()
|
D | hswish-scalar-x2.c | 35 float vx1 = x[1]; in xnn_f32_hswish_ukernel__scalar_x2() local 40 float vacc1 = vx1 + vthree; in xnn_f32_hswish_ukernel__scalar_x2() 41 vx1 *= vsixth; in xnn_f32_hswish_ukernel__scalar_x2() 50 vacc1 *= vx1; in xnn_f32_hswish_ukernel__scalar_x2()
|
D | hswish-wasm-x4.c | 35 float vx1 = x[1]; in xnn_f32_hswish_ukernel__wasm_x4() local 42 float vacc1 = vx1 + vthree; in xnn_f32_hswish_ukernel__wasm_x4() 43 vx1 *= vsixth; in xnn_f32_hswish_ukernel__wasm_x4() 60 vacc1 *= vx1; in xnn_f32_hswish_ukernel__wasm_x4()
|
D | hswish-scalar-x4.c | 35 float vx1 = x[1]; in xnn_f32_hswish_ukernel__scalar_x4() local 42 float vacc1 = vx1 + vthree; in xnn_f32_hswish_ukernel__scalar_x4() 43 vx1 *= vsixth; in xnn_f32_hswish_ukernel__scalar_x4() 60 vacc1 *= vx1; in xnn_f32_hswish_ukernel__scalar_x4()
|
/external/XNNPACK/src/f32-vunary/gen/ |
D | vsqr-scalar-x2.c | 30 const float vx1 = x[1]; in xnn_f32_vsqr_ukernel__scalar_x2() local 34 const float vy1 = vx1 * vx1; in xnn_f32_vsqr_ukernel__scalar_x2()
|
D | vsqr-scalar-x4.c | 30 const float vx1 = x[1]; in xnn_f32_vsqr_ukernel__scalar_x4() local 36 const float vy1 = vx1 * vx1; in xnn_f32_vsqr_ukernel__scalar_x4()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-lut64-p2-x2.c | 51 const float vx1 = vi1 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local 61 float vn1 = vx1 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() 90 float vt1 = vn1 * vminus_ln2_o64_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() 115 if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) { in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2()
|
D | scalar-p5-x2.c | 50 const float vx1 = vi1 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local 58 float vn1 = vx1 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() 72 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() 105 if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) { in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2()
|
D | scalar-p5-x2-acc2.c | 51 const float vx1 = vi1 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local 59 float vn1 = vx1 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() 73 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() 106 if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) { in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2()
|
D | scalar-lut64-p2-x2-acc2.c | 52 const float vx1 = vi1 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local 62 float vn1 = vx1 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() 91 float vt1 = vn1 * vminus_ln2_o64_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() 116 if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) { in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2()
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx2-rr1-p6-x16.c | 44 __m256 vx1 = _mm256_loadu_ps(x + 8); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16() local 48 const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale)); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16() 86 vx1 = _mm256_mul_ps(vx1, vbeta); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16() 89 const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16()
|
D | velu-avx2-rr1-lut16-p3-gather-x16.c | 44 __m256 vx1 = _mm256_loadu_ps(x + 8); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16() local 48 const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16() 84 vx1 = _mm256_mul_ps(vx1, vbeta); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16() 87 const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
|
D | velu-avx2-rr1-lut8-p4-perm-x16.c | 44 __m256 vx1 = _mm256_loadu_ps(x + 8); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16() local 48 const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16() 84 vx1 = _mm256_mul_ps(vx1, vbeta); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16() 87 const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16()
|
D | velu-scalar-rr2-lut16-p3-x2.c | 45 float vx1 = x[1]; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() local 49 const float vz1 = vx1 * vprescale; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() 94 float vy1 = vx1 * vbeta; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() 99 if XNN_UNPREDICTABLE(vx1 < 0.0f) { in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()
|
D | velu-scalar-rr2-p6-x2.c | 45 float vx1 = x[1]; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() local 49 const float vz1 = vx1 * vprescale; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() 100 float vy1 = vx1 * vbeta; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() 105 if XNN_UNPREDICTABLE(vx1 < 0.0f) { in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()
|
/external/XNNPACK/src/f32-vsqrt/gen/ |
D | avx512f-nr1fma1adj-x32.c | 31 const __m512 vx1 = _mm512_loadu_ps(x + 16); in xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x32() local 35 const __m512 vrsqrtx1 = _mm512_rsqrt14_ps(vx1); in xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x32() 39 __m512 vsqrtx1 = _mm512_mul_ps(vrsqrtx1, vx1); in xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x32() 51 const __m512 vadjustment1 = _mm512_fnmadd_ps(vsqrtx1, vsqrtx1, vx1); in xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x32()
|