/external/libvterm/src/ |
D | parser.c | 13 static void do_control(VTerm *vt, unsigned char control) in do_control() argument 15 if(vt->parser.callbacks && vt->parser.callbacks->control) in do_control() 16 if((*vt->parser.callbacks->control)(control, vt->parser.cbdata)) in do_control() 22 static void do_csi(VTerm *vt, char command) in do_csi() argument 26 printf(" leader: %s\n", vt->parser.csi_leader); in do_csi() 27 for(int argi = 0; argi < vt->parser.csi_argi; argi++) { in do_csi() 28 printf(" %lu", CSI_ARG(vt->parser.csi_args[argi])); in do_csi() 29 if(!CSI_ARG_HAS_MORE(vt->parser.csi_args[argi])) in do_csi() 31 printf(" intermed: %s\n", vt->parser.intermed); in do_csi() 35 if(vt->parser.callbacks && vt->parser.callbacks->csi) in do_csi() [all …]
|
D | vterm.c | 38 VTerm *vt = (*funcs->malloc)(sizeof(VTerm), allocdata); in vterm_new_with_allocator() local 40 vt->allocator = funcs; in vterm_new_with_allocator() 41 vt->allocdata = allocdata; in vterm_new_with_allocator() 43 vt->rows = rows; in vterm_new_with_allocator() 44 vt->cols = cols; in vterm_new_with_allocator() 46 vt->parser.state = NORMAL; in vterm_new_with_allocator() 48 vt->parser.callbacks = NULL; in vterm_new_with_allocator() 49 vt->parser.cbdata = NULL; in vterm_new_with_allocator() 51 vt->parser.strbuffer_len = 64; in vterm_new_with_allocator() 52 vt->parser.strbuffer_cur = 0; in vterm_new_with_allocator() [all …]
|
D | keyboard.c | 7 void vterm_keyboard_unichar(VTerm *vt, uint32_t c, VTermModifier mod) in vterm_keyboard_unichar() argument 19 vterm_push_output_bytes(vt, str, seqlen); in vterm_keyboard_unichar() 44 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "%d;%du", c, mod+1); in vterm_keyboard_unichar() 51 vterm_push_output_sprintf(vt, "%s%c", mod & VTERM_MOD_ALT ? ESC_S : "", c); in vterm_keyboard_unichar() 128 void vterm_keyboard_key(VTerm *vt, VTermKey key, VTermModifier mod) in vterm_keyboard_key() argument 157 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "Z"); in vterm_keyboard_key() 159 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "1;%dZ", mod+1); in vterm_keyboard_key() 166 if(vt->state->mode.newline) in vterm_keyboard_key() 167 vterm_push_output_sprintf(vt, "\r\n"); in vterm_keyboard_key() 174 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "%d;%du", k.literal, mod+1); in vterm_keyboard_key() [all …]
|
D | input.c | 7 void vterm_input_push_char(VTerm *vt, VTermModifier mod, uint32_t c) in vterm_input_push_char() argument 19 vterm_push_output_bytes(vt, str, seqlen); in vterm_input_push_char() 40 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "%d;%du", c, mod+1); in vterm_input_push_char() 47 vterm_push_output_sprintf(vt, "%s%c", mod & VTERM_MOD_ALT ? "\e" : "", c); in vterm_input_push_char() 124 void vterm_input_push_key(VTerm *vt, VTermModifier mod, VTermKey key) in vterm_input_push_key() argument 153 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "Z"); in vterm_input_push_key() 155 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "1;%dZ", mod+1); in vterm_input_push_key() 162 if(vt->state->mode.newline) in vterm_input_push_key() 163 vterm_push_output_sprintf(vt, "\r\n"); in vterm_input_push_key() 170 vterm_push_output_sprintf_ctrl(vt, C1_CSI, "%d;%du", k.literal, mod+1); in vterm_input_push_key() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | tailcall-ri64.ll | 13 %vt = type { i32 (...)** } 15 define %vt* @_ZN4llvm9UnsetInit20convertInitializerToEPNS_5RecTyE(%class* 16 %this, %vt* %Ty) align 2 { 18 %0 = bitcast %vt* %Ty to %vt* (%vt*, %class*)*** 19 %vtable = load %vt* (%vt*, %class*)**, %vt* (%vt*, %class*)*** %0, align 8 20 %vfn = getelementptr inbounds %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vtable, i64 4 21 %1 = load %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vfn, align 8 22 %call = tail call %vt* %1(%vt* %Ty, %class* %this) 23 ret %vt* %call
|
/external/llvm/test/CodeGen/X86/ |
D | tailcall-ri64.ll | 13 %vt = type { i32 (...)** } 15 define %vt* @_ZN4llvm9UnsetInit20convertInitializerToEPNS_5RecTyE(%class* 16 %this, %vt* %Ty) align 2 { 18 %0 = bitcast %vt* %Ty to %vt* (%vt*, %class*)*** 19 %vtable = load %vt* (%vt*, %class*)**, %vt* (%vt*, %class*)*** %0, align 8 20 %vfn = getelementptr inbounds %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vtable, i64 4 21 %1 = load %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vfn, align 8 22 %call = tail call %vt* %1(%vt* %Ty, %class* %this) 23 ret %vt* %call
|
/external/lzma/CPP/Windows/ |
D | PropVariant.cpp | 31 p->vt = VT_ERROR; in PropVarEm_Alloc_Bstr() 35 p->vt = VT_BSTR; in PropVarEm_Alloc_Bstr() 44 p->vt = VT_BSTR; in PropVarEm_Set_Str() 47 p->vt = VT_ERROR; in PropVarEm_Set_Str() 54 vt = VT_EMPTY; in CPropVariant() 60 vt = VT_EMPTY; in CPropVariant() 66 vt = VT_EMPTY; in CPropVariant() 72 vt = VT_EMPTY; in CPropVariant() 99 vt = VT_BSTR; in operator =() 114 vt = VT_BSTR; in operator =() [all …]
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | psimd-p5-div-x4.c | 72 psimd_f32 vt = psimd_qfma_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() local 73 vt = psimd_qfma_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 77 psimd_f32 vp = psimd_qfma_f32(vc4, vt, vc5); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 78 vp = psimd_qfma_f32(vc3, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 79 vp = psimd_qfma_f32(vc2, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 80 vp = psimd_qfma_f32(vc1, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 86 vt = psimd_mul_f32(vt, vs); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 87 const psimd_f32 ve = psimd_qfma_f32(vs, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() 131 psimd_f32 vt = psimd_qfma_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() local 132 vt = psimd_qfma_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x4() [all …]
|
D | sse41-p5-div-x4.c | 72 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() local 73 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 76 __m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 77 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 78 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 79 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 85 vt = _mm_mul_ps(vt, vs); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 86 __m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() 135 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() local 136 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_sigmoid_ukernel__sse41_p5_div_x4() [all …]
|
D | sse2-p5-div-x4.c | 72 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() local 73 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 76 __m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 77 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 78 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 79 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 85 vt = _mm_mul_ps(vt, vs); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 86 __m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() 136 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() local 137 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_sigmoid_ukernel__sse2_p5_div_x4() [all …]
|
D | neon-rr2-p5-nr2recps-x4.c | 71 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() local 72 vt = vmlaq_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 75 float32x4_t vp = vmlaq_f32(vc4, vc5, vt); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 76 vp = vmlaq_f32(vc3, vp, vt); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 77 vp = vmlaq_f32(vc2, vp, vt); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 78 vp = vmlaq_f32(vc1, vp, vt); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 84 vt = vmulq_f32(vt, vs); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 85 float32x4_t ve = vmlaq_f32(vs, vp, vt); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() 141 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() local 142 vt = vmlaq_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__neon_rr2_p5_nr2recps_x4() [all …]
|
D | neonfma-rr1-p5-div-x4.c | 68 float32x4_t vt = vfmaq_f32(vz, vn, vln2); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() local 71 float32x4_t vp = vfmaq_f32(vc4, vc5, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 72 vp = vfmaq_f32(vc3, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 73 vp = vfmaq_f32(vc2, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 74 vp = vfmaq_f32(vc1, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 80 vt = vmulq_f32(vt, vs); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 81 float32x4_t ve = vfmaq_f32(vs, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 127 float32x4_t vt = vfmaq_f32(vz, vn, vln2); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() local 130 float32x4_t vp = vfmaq_f32(vc4, vc5, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() 131 vp = vfmaq_f32(vc3, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_div_x4() [all …]
|
D | avx2-rr1-p5-div-x8.c | 72 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() local 75 __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 76 vp = _mm256_fmadd_ps(vp, vt, vc3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 77 vp = _mm256_fmadd_ps(vp, vt, vc2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 78 vp = _mm256_fmadd_ps(vp, vt, vc1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 84 vt = _mm256_mul_ps(vt, vs); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 85 const __m256 ve = _mm256_fmadd_ps(vt, vp, vs); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 135 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() local 138 __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 139 vp = _mm256_fmadd_ps(vp, vt, vc3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() [all …]
|
D | neonfma-rr1-p5-nr2recps-x4.c | 68 float32x4_t vt = vfmaq_f32(vz, vn, vln2); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() local 71 float32x4_t vp = vfmaq_f32(vc4, vc5, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 72 vp = vfmaq_f32(vc3, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 73 vp = vfmaq_f32(vc2, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 74 vp = vfmaq_f32(vc1, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 80 vt = vmulq_f32(vt, vs); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 81 float32x4_t ve = vfmaq_f32(vs, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 136 float32x4_t vt = vfmaq_f32(vz, vn, vln2); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() local 139 float32x4_t vp = vfmaq_f32(vc4, vc5, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() 140 vp = vfmaq_f32(vc3, vp, vt); in xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4() [all …]
|
D | psimd-p5-div-x8.c | 154 psimd_f32 vt = psimd_qfma_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() local 155 vt = psimd_qfma_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 159 psimd_f32 vp = psimd_qfma_f32(vc4, vt, vc5); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 160 vp = psimd_qfma_f32(vc3, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 161 vp = psimd_qfma_f32(vc2, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 162 vp = psimd_qfma_f32(vc1, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 168 vt = psimd_mul_f32(vt, vs); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 169 const psimd_f32 ve = psimd_qfma_f32(vs, vt, vp); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() 213 psimd_f32 vt = psimd_qfma_f32(vz, vn, vln2_hi); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() local 214 vt = psimd_qfma_f32(vt, vn, vln2_lo); in xnn_f32_sigmoid_ukernel__psimd_p5_div_x8() [all …]
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | neonfma-p5-x4.c | 67 float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() local 68 vt = vfmaq_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 71 float32x4_t vp = vfmaq_f32(vc4, vc5, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 72 vp = vfmaq_f32(vc3, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 73 vp = vfmaq_f32(vc2, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 74 vp = vfmaq_f32(vc1, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 80 vt = vmulq_f32(vt, vs); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 81 float32x4_t vf = vfmaq_f32(vs, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() 124 float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() local 125 vt = vfmaq_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x4() [all …]
|
D | neon-p5-x4.c | 68 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() local 69 vt = vmlaq_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 72 float32x4_t vp = vmlaq_f32(vc4, vc5, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 73 vp = vmlaq_f32(vc3, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 74 vp = vmlaq_f32(vc2, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 75 vp = vmlaq_f32(vc1, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 81 vt = vmulq_f32(vt, vs); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 82 float32x4_t vf = vmlaq_f32(vs, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() 125 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() local 126 vt = vmlaq_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x4() [all …]
|
D | psimd-p5-x4.c | 118 psimd_f32 vt = psimd_qfma_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() local 119 vt = psimd_qfma_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 122 psimd_f32 vp = psimd_qfma_f32(vc4, vc5, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 123 vp = psimd_qfma_f32(vc3, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 124 vp = psimd_qfma_f32(vc2, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 125 vp = psimd_qfma_f32(vc1, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 131 vt = psimd_mul_f32(vt, vs); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 132 psimd_f32 vf = psimd_qfma_f32(vs, vt, vp); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() 166 psimd_f32 vt = psimd_qfma_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() local 167 vt = psimd_qfma_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x4() [all …]
|
D | sse2-p5-x4.c | 118 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() local 119 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 122 __m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 123 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 124 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 125 vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 131 vt = _mm_mul_ps(vt, vs); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 132 __m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() 166 __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() local 167 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x4() [all …]
|
D | psimd-p5-x8.c | 134 psimd_f32 vt = psimd_qfma_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() local 135 vt = psimd_qfma_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 138 psimd_f32 vp = psimd_qfma_f32(vc4, vc5, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 139 vp = psimd_qfma_f32(vc3, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 140 vp = psimd_qfma_f32(vc2, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 141 vp = psimd_qfma_f32(vc1, vp, vt); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 147 vt = psimd_mul_f32(vt, vs); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 148 psimd_f32 vf = psimd_qfma_f32(vs, vt, vp); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() 182 psimd_f32 vt = psimd_qfma_f32(vx, vn, vminus_ln2_hi); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() local 183 vt = psimd_qfma_f32(vt, vn, vminus_ln2_lo); in xnn_f32_raddstoreexpminusmax_ukernel__psimd_p5_x8() [all …]
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx2-p5-x8.c | 117 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() local 118 vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 121 __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 122 vp = _mm256_fmadd_ps(vp, vt, vc3); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 123 vp = _mm256_fmadd_ps(vp, vt, vc2); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 124 vp = _mm256_fmadd_ps(vp, vt, vc1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 130 vt = _mm256_mul_ps(vt, vs); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 131 __m256 vf = _mm256_fmadd_ps(vt, vp, vs); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() 167 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() local 168 vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() [all …]
|
D | avx512f-p5-scalef-x16.c | 95 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() local 96 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 99 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 100 vp = _mm512_fmadd_ps(vp, vt, vc3); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 101 vp = _mm512_fmadd_ps(vp, vt, vc2); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 102 vp = _mm512_fmadd_ps(vp, vt, vc1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 103 vp = _mm512_fmadd_ps(vp, vt, vc0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 133 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() local 134 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() 137 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() [all …]
|
D | avx2-p5-x16.c | 133 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local 134 vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 137 __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 138 vp = _mm256_fmadd_ps(vp, vt, vc3); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 139 vp = _mm256_fmadd_ps(vp, vt, vc2); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 140 vp = _mm256_fmadd_ps(vp, vt, vc1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 146 vt = _mm256_mul_ps(vt, vs); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 147 __m256 vf = _mm256_fmadd_ps(vt, vp, vs); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() 183 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local 184 vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() [all …]
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x16.c | 96 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() local 97 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 100 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 101 vp = _mm512_fmadd_ps(vp, vt, vc3); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 102 vp = _mm512_fmadd_ps(vp, vt, vc2); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 103 vp = _mm512_fmadd_ps(vp, vt, vc1); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 104 vp = _mm512_fmadd_ps(vp, vt, vc0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 130 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() local 131 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() 134 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() [all …]
|
/external/libaom/libaom/av1/encoder/ |
D | var_based_part.c | 47 VP128x128 *vt = (VP128x128 *)data; in tree_to_node() local 48 node->part_variances = &vt->part_variances; in tree_to_node() 50 node->split[i] = &vt->split[i].part_variances.none; in tree_to_node() 54 VP64x64 *vt = (VP64x64 *)data; in tree_to_node() local 55 node->part_variances = &vt->part_variances; in tree_to_node() 57 node->split[i] = &vt->split[i].part_variances.none; in tree_to_node() 61 VP32x32 *vt = (VP32x32 *)data; in tree_to_node() local 62 node->part_variances = &vt->part_variances; in tree_to_node() 64 node->split[i] = &vt->split[i].part_variances.none; in tree_to_node() 68 VP16x16 *vt = (VP16x16 *)data; in tree_to_node() local [all …]
|