/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx2-rr1-p5-nr2fma-x72.c | 180 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() local 190 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 200 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 210 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72()
|
D | avx2-rr1-p5-nr2fma-x80.c | 193 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() local 204 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 215 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 226 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80()
|
D | avx-rr2-p5-nr2-x72.c | 210 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() local 228 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() 229 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() 239 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72()
|
D | avx2-rr1-p5-nr1fma-x72.c | 180 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() local 190 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() 201 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72()
|
D | avx2-rr1-p5-nr1fma-x80.c | 193 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() local 204 vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() 216 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80()
|
D | avx-rr2-p5-nr2-x80.c | 226 __m256 vr8 = _mm256_rcp_ps(vd8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80() local 245 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80() 246 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80() 258 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
|
/external/llvm-project/lldb/source/Plugins/Process/Utility/ |
D | RegisterInfos_ppc64.h | 149 DEFINE_VMX_PPC64(vr8, LLDB_INVALID_REGNUM), \ 286 uint32_t vr8[4]; member
|
D | RegisterInfos_ppc64le.h | 160 DEFINE_VMX(vr8, LLDB_INVALID_REGNUM), \ 364 uint32_t vr8[4]; member
|
/external/llvm-project/llvm/lib/Target/CSKY/ |
D | CSKYRegisterInfo.td | 107 def F8_32 : CSKYFReg32<8, "fr8", ["vr8"]>, DwarfRegNum<[40]>;
|
/external/elfutils/tests/ |
D | run-allregs.sh | 1152 1132: vr8 (vr8), unsigned 128 bits 2175 1132: vr8 (vr8), unsigned 128 bits
|
D | run-addrcfi.sh | 1294 vector reg1132 (vr8): undefined 2316 vector reg1132 (vr8): undefined 3344 vector reg1132 (vr8): undefined
|
/external/hyphenation-patterns/nn/ |
D | hyph-nn.pat.txt | 25750 2vr8
|
/external/hyphenation-patterns/nb/ |
D | hyph-nb.pat.txt | 25750 2vr8
|