/external/clang/test/CodeGen/ |
D | arm64-abi-vector.c | 86 __char5 c5 = va_arg(ap, __char5); in varargs_vec_5c() local 87 sum = sum + c5.x + c5.y; in varargs_vec_5c() 170 __short5 c5 = va_arg(ap, __short5); in varargs_vec_5s() local 171 sum = sum + c5.x + c5.y; in varargs_vec_5s() 213 __int5 c5 = va_arg(ap, __int5); in varargs_vec_5i() local 214 sum = sum + c5.x + c5.y; in varargs_vec_5i() 255 __char5 c5 = va_arg(ap, __char5); in varargs_vec() local 258 sum = sum + c5.x + c5.y; in varargs_vec() 300 double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19, in test() argument 303 double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3); in test() [all …]
|
/external/python/cpython3/Lib/test/ |
D | test_normalization.py | 66 c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]] 81 self.assertTrue(c4 == NFC(c4) == NFC(c5), line) 83 self.assertTrue(c5 == NFD(c4) == NFD(c5), line) 85 NFKC(c3) == NFKC(c4) == NFKC(c5), 87 self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \ 88 NFKD(c3) == NFKD(c4) == NFKD(c5), 95 self.assertTrue(is_normalized("NFD", c5)) 98 self.assertTrue(is_normalized("NFKD", c5))
|
/external/cpuinfo/test/dmesg/ |
D | galaxy-s5-global.log | 131 <4>[ 0.351226] [c5] CPU5: Booted secondary processor 132 <6>[ 0.352099] [c5] CPU5: thread -1, cpu 1, socket 0, mpidr 80000001 405 <6>[ 0.538663] [c5] bio: create slab <bio-0> at 0 546 <6>[ 0.633818] [c5] IOVMM: Created debugfs entry at debugfs/iovmm 547 <6>[ 0.633852] [c5] IOMMU: Created debugfs entry at debugfs/iommu 574 <6>[ 0.820093] [c5] FIPS: self-tests for aes-asm (aes) passed 575 <6>[ 0.820349] [c5] FIPS: self-tests for sha1-asm (sha1) passed 589 <6>[ 0.847046] [c5] FIPS: self-tests for non-FIPS cipher_null-generic (cipher_null) passed 590 <6>[ 0.847203] [c5] FIPS: self-tests for non-FIPS ecb-cipher_null (ecb(cipher_null)) passed 591 <6>[ 0.847357] [c5] FIPS: self-tests for non-FIPS compress_null-generic (compress_null) passed [all …]
|
/external/python/cpython2/Lib/test/ |
D | test_normalization.py | 57 c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]] 72 self.assertTrue(c4 == NFC(c4) == NFC(c5), line) 74 self.assertTrue(c5 == NFD(c4) == NFD(c5), line) 76 NFKC(c3) == NFKC(c4) == NFKC(c5), 78 self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \ 79 NFKD(c3) == NFKD(c4) == NFKD(c5),
|
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 8x8-neon.c | 52 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_ppmm_ukernel_8x8__neon() local 54 c5 = c4; in xnn_f32_ppmm_ukernel_8x8__neon() 56 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_ppmm_ukernel_8x8__neon() 58 c6 = c5; in xnn_f32_ppmm_ukernel_8x8__neon() 154 vst1q_f32(c5, vacc5x0123); in xnn_f32_ppmm_ukernel_8x8__neon() 155 vst1q_f32(c5 + 4, vacc5x4567); in xnn_f32_ppmm_ukernel_8x8__neon() 156 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_ppmm_ukernel_8x8__neon() 180 vst1q_f32(c5, vacc5x0123); c5 += 4; in xnn_f32_ppmm_ukernel_8x8__neon() 207 vst1_f32(c5, vacc5x01); c5 += 2; in xnn_f32_ppmm_ukernel_8x8__neon() 226 vst1_lane_f32(c5, vacc5x01, 0); in xnn_f32_ppmm_ukernel_8x8__neon()
|
D | 8x8-neonfma.c | 52 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_ppmm_ukernel_8x8__neonfma() local 54 c5 = c4; in xnn_f32_ppmm_ukernel_8x8__neonfma() 56 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_ppmm_ukernel_8x8__neonfma() 58 c6 = c5; in xnn_f32_ppmm_ukernel_8x8__neonfma() 182 vst1q_f32(c5, vacc5x0123); in xnn_f32_ppmm_ukernel_8x8__neonfma() 183 vst1q_f32(c5 + 4, vacc5x4567); in xnn_f32_ppmm_ukernel_8x8__neonfma() 184 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_ppmm_ukernel_8x8__neonfma() 208 vst1q_f32(c5, vacc5x0123); c5 += 4; in xnn_f32_ppmm_ukernel_8x8__neonfma() 235 vst1_f32(c5, vacc5x01); c5 += 2; in xnn_f32_ppmm_ukernel_8x8__neonfma() 254 vst1_lane_f32(c5, vacc5x01, 0); in xnn_f32_ppmm_ukernel_8x8__neonfma()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 7x8-fma3-broadcast.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() local 62 c5 = c4; in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 66 c6 = c5; in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 171 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 172 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 196 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 212 c5 += 4; in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 221 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() 237 c5 += 2; in xnn_f32_igemm_ukernel_7x8__fma3_broadcast() [all …]
|
D | 7x8-avx-broadcast.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() local 62 c5 = c4; in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 66 c6 = c5; in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 171 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 172 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 196 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 212 c5 += 4; in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 221 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_igemm_ukernel_7x8__avx_broadcast() 237 c5 += 2; in xnn_f32_igemm_ukernel_7x8__avx_broadcast() [all …]
|
D | 8x8-fma3-broadcast.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() local 62 c5 = c4; in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 66 c6 = c5; in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 188 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 189 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 215 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 233 c5 += 4; in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 243 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() 261 c5 += 2; in xnn_f32_igemm_ukernel_8x8__fma3_broadcast() [all …]
|
D | 6x8-psimd-loadsplat.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() local 62 c5 = c4; in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 179 psimd_store_f32(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 180 psimd_store_f32(c5 + 4, vacc5x4567); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 181 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 202 psimd_store_f32(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 216 c5 += 4; in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 224 psimd_store2_f32(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 238 c5 += 2; in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat() 246 psimd_store1_f32(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__psimd_loadsplat()
|
D | 6x8-fma3-broadcast.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() local 62 c5 = c4; in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 154 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 155 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 177 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 191 c5 += 4; in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 199 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 213 c5 += 2; in xnn_f32_igemm_ukernel_6x8__fma3_broadcast() 221 _mm_store_ss(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__fma3_broadcast()
|
D | 6x8-avx-broadcast.c | 60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_ukernel_6x8__avx_broadcast() local 62 c5 = c4; in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 154 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 155 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 177 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 191 c5 += 4; in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 199 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 213 c5 += 2; in xnn_f32_igemm_ukernel_6x8__avx_broadcast() 221 _mm_store_ss(c5, vacc5x0123); in xnn_f32_igemm_ukernel_6x8__avx_broadcast()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 7x8-fma3-broadcast.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() local 68 c5 = c4; in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 74 c6 = c5; in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 139 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 140 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 171 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 187 c5 += 4; in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 196 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() 212 c5 += 2; in xnn_f32_gemm_ukernel_7x8__fma3_broadcast() [all …]
|
D | 7x8-avx-broadcast.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() local 68 c5 = c4; in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 74 c6 = c5; in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 139 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 140 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 171 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 187 c5 += 4; in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 196 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemm_ukernel_7x8__avx_broadcast() 212 c5 += 2; in xnn_f32_gemm_ukernel_7x8__avx_broadcast() [all …]
|
D | 8x8-fma3-broadcast.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() local 68 c5 = c4; in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 74 c6 = c5; in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 153 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 154 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 188 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 206 c5 += 4; in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 216 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() 234 c5 += 2; in xnn_f32_gemm_ukernel_8x8__fma3_broadcast() [all …]
|
D | 6x8-psimd-loadsplat.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() local 68 c5 = c4; in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 150 psimd_store_f32(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 151 psimd_store_f32(c5 + 4, vacc5x4567); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 152 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 179 psimd_store_f32(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 193 c5 += 4; in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 201 psimd_store2_f32(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 215 c5 += 2; in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat() 223 psimd_store1_f32(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__psimd_loadsplat()
|
D | 6x8-avx-broadcast.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_6x8__avx_broadcast() local 68 c5 = c4; in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 125 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 126 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 154 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 168 c5 += 4; in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 176 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 190 c5 += 2; in xnn_f32_gemm_ukernel_6x8__avx_broadcast() 198 _mm_store_ss(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__avx_broadcast()
|
D | 6x8-fma3-broadcast.c | 65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() local 68 c5 = c4; in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 125 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 126 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 154 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 168 c5 += 4; in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 176 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 190 c5 += 2; in xnn_f32_gemm_ukernel_6x8__fma3_broadcast() 198 _mm_store_ss(c5, vacc5x0123); in xnn_f32_gemm_ukernel_6x8__fma3_broadcast()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 7x8-avx-broadcast.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 76 c6 = c5; in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 141 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 142 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 173 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 189 c5 += 4; in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 198 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() 214 c5 += 2; in xnn_f32_gemminc_ukernel_7x8__avx_broadcast() [all …]
|
D | 7x8-fma3-broadcast.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 76 c6 = c5; in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 141 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 142 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 173 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 189 c5 += 4; in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 198 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() 214 c5 += 2; in xnn_f32_gemminc_ukernel_7x8__fma3_broadcast() [all …]
|
D | 8x8-fma3-broadcast.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 76 c6 = c5; in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 155 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 156 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 190 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 208 c5 += 4; in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 218 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() 236 c5 += 2; in xnn_f32_gemminc_ukernel_8x8__fma3_broadcast() [all …]
|
D | 6x8-psimd-loadsplat.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 152 psimd_store_f32(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 153 psimd_store_f32(c5 + 4, vacc5x4567); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 154 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 181 psimd_store_f32(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 195 c5 += 4; in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 203 psimd_store2_f32(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 217 c5 += 2; in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat() 225 psimd_store1_f32(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat()
|
D | 6x8-fma3-broadcast.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 127 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 128 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 156 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 170 c5 += 4; in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 178 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 192 c5 += 2; in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast() 200 _mm_store_ss(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__fma3_broadcast()
|
D | 6x8-avx-broadcast.c | 67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() local 70 c5 = c4; in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 127 _mm256_storeu_ps(c5, vacc5x01234567); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 128 c5 = (float*) ((uintptr_t) c5 + cn_stride); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 156 _mm_storeu_ps(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 170 c5 += 4; in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 178 _mm_storel_pi((__m64*) c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 192 c5 += 2; in xnn_f32_gemminc_ukernel_6x8__avx_broadcast() 200 _mm_store_ss(c5, vacc5x0123); in xnn_f32_gemminc_ukernel_6x8__avx_broadcast()
|
/external/mesa3d/src/intel/tools/tests/gen6/ |
D | send.expected | 12 31 01 60 02 c5 0f cf 21 44 00 6e 00 40 70 10 04 28 31 01 60 02 c5 0f ef 21 44 00 6e 00 40 70 10 02 30 31 01 60 02 c5 0f 4f 20 44 00 6e 00 00 a0 10 02 31 31 01 60 02 c5 0f 6f 20 44 00 6e 00 01 a1 10 02 32 31 01 60 02 c5 0f af 20 44 00 6e 00 02 a2 10 02 33 31 01 60 02 c5 0f ef 20 44 00 6e 00 03 a3 10 02 34 31 01 60 02 c5 0f 2f 21 44 00 6e 00 04 a4 10 02 35 31 01 60 02 c5 0f 6f 21 44 00 6e 00 05 a5 10 02 36 31 01 60 02 c5 0f af 21 44 00 6e 00 06 a6 10 02 41 31 01 60 02 c5 0f 6f 21 44 00 6e 00 01 80 18 04 [all …]
|