Home
last modified time | relevance | path

Searched refs:x8 (Results 1 – 25 of 510) sorted by relevance

12345678910>>...21

/third_party/ffmpeg/tests/ref/fate/
Dvp9-encparams8 block 3 64:16 16x8 0
9 block 4 64:24 16x8 0
23 block 18 144:32 16x8 16
24 block 19 144:40 16x8 16
25 block 20 128:48 8x8 -12
26 block 21 136:48 8x8 -12
27 block 22 128:56 8x8 -12
28 block 23 136:56 8x8 -12
30 block 25 160:32 8x8 33
31 block 26 168:32 8x8 33
[all …]
Dpixelutils3 [OK] [UU] SAD [random] 8x8=5178 ref=5178
8 [OK] [AU] SAD [random] 8x8=5821 ref=5821
13 [OK] [AA] SAD [random] 8x8=5130 ref=5130
18 [OK] [UU] SAD [max] 8x8=16320 ref=16320
23 [OK] [AU] SAD [max] 8x8=16320 ref=16320
28 [OK] [AA] SAD [max] 8x8=16320 ref=16320
33 [OK] [UU] SAD [min] 8x8=0 ref=0
38 [OK] [AU] SAD [min] 8x8=0 ref=0
43 [OK] [AA] SAD [min] 8x8=0 ref=0
52 [OK] [UU] SAD [small] 8x8=6510 ref=6510
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/
DMatmul12X16Fp16.S44 ldr x8, [sp]
94 mul x8, x8, x21 // output_stride
718 add x11, x11, x8
722 add x11, x11, x8
726 add x11, x11, x8
730 add x11, x11, x8
734 add x11, x11, x8
738 add x11, x11, x8
742 add x11, x11, x8
746 add x11, x11, x8
[all …]
DMatmulBaseFp16Neon.S41 ldr x8, [sp]
43 add x8, x8, x8 // stride * sizeof(float16_t)
230 st1 {v16.8h}, [x11], x8
231 st1 {v17.8h}, [x11], x8
232 st1 {v18.8h}, [x11], x8
233 st1 {v19.8h}, [x11], x8
234 st1 {v20.8h}, [x11], x8
235 st1 {v21.8h}, [x11], x8
236 st1 {v22.8h}, [x11], x8
237 st1 {v23.8h}, [x11], x8
[all …]
DMatmulFp16Opt.S41 ldr x8, [sp]
54 mul x15, x7, x8
57 mul x16, x8, x21 // kernel_size * 8 * sizeof(float16_t)
60 mul x8, x8, x21
630 add x11, x11, x8
634 add x11, x11, x8
638 add x11, x11, x8
642 add x11, x11, x8
646 add x11, x11, x8
650 add x11, x11, x8
[all …]
DConvDwFp16Row.S42 mov x8, x4
45 cmp x8, #32
47 sub x8, x8, #32
53 cmp x8, #32
74 sub x8, x8, #32
75 cmp x8, #32
93 cmp x8, #8
102 sub x8, x8, #8
103 cmp x8, #8
107 cmp x8, #0
[all …]
DWinogradTransRightFp16.S27 mov x8, #8 // 4 * sizeof(float16)
28 mul x8, x6, x8
29 mul x9, x5, x8 // step for S
45 sub x2, x2, x8
61 add x14, x17, x8
62 add x16, x14, x8
63 add x19, x16, x8
80 sub x2, x2, x8
98 add x14, x17, x8
99 add x16, x14, x8
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/
DMatmulFp32OptRow8.S42 ldr x8, [sp]
57 mul x8, x8, x22 // stride * sizeof(float), in c4 stride == row
58 mul x8, x8, x22 // col stride
72 mul x15, x7, x8
75 mul x16, x8, x21 // kernel_size * 8 * sizeof(float)
78 mul x8, x8, x21
322 add x11, x11, x8
326 add x11, x11, x8
330 add x11, x11, x8
334 add x11, x11, x8
[all …]
DMatmulFp32OptRow12.S42 ldr x8, [sp]
57 mul x8, x8, x22 // stride * sizeof(float), in c4 stride == row
58 mul x8, x8, x22 // col stride
72 mul x15, x7, x8
75 mul x16, x8, x21 // kernel_size * 8 * sizeof(float)
78 mul x8, x8, x21
380 add x11, x11, x8
384 add x11, x11, x8
388 add x11, x11, x8
392 add x11, x11, x8
[all …]
DMatmulFp32OptRow4.S42 ldr x8, [sp]
58 mul x8, x8, x22 // stride * sizeof(float), in c4 stride == row
59 mul x8, x8, x22 // col stride
73 mul x15, x7, x8
76 mul x16, x8, x21 // kernel_size * 8 * sizeof(float)
79 mul x8, x8, x21
263 add x11, x11, x8
267 add x11, x11, x8
271 add x11, x11, x8
273 add x11, x11, x8
[all …]
DConvDwFp32Row.S42 mov x8, x4
45 cmp x8, #16
47 sub x8, x8, #16
53 cmp x8, #16
74 sub x8, x8, #16
75 cmp x8, #16
93 cmp x8, #4
102 sub x8, x8, #4
103 cmp x8, #4
107 cmp x8, #0
[all …]
DMatmulFp32Opt.S42 ldr x8, [sp]
57 mul x8, x8, x22 // stride * sizeof(float), in c4 stride == row
58 mul x8, x8, x22 // col stride
73 mul x15, x7, x8
76 mul x16, x8, x21 // kernel_size * 8 * sizeof(float)
79 mul x8, x8, x21
748 add x11, x11, x8
752 add x11, x11, x8
756 add x11, x11, x8
760 add x11, x11, x8
[all …]
DAdderFp32.S41 ldr x8, [sp]
47 mul x8, x8, x20
426 add x11, x11, x8
430 add x11, x11, x8
434 add x11, x11, x8
438 add x11, x11, x8
442 add x11, x11, x8
446 add x11, x11, x8
450 add x11, x11, x8
454 add x11, x11, x8
[all …]
DWinogradTransRight.S35 mov x8, #16 // 4 * sizeof(float)
36 mul x8, x6, x8
37 mul x9, x5, x8 // step for S
53 sub x2, x2, x8
69 add x14, x17, x8
70 add x16, x14, x8
71 add x19, x16, x8
88 sub x2, x2, x8
106 add x14, x17, x8
107 add x16, x14, x8
[all …]
DWinogradTransLeft.S35 mov x8, #16 // 4 * sizeof(float)
36 mul x8, x6, x8
37 mul x9, x3, x8
38 sub x9, x9, x8
39 add x7, x9, x8 // step for S
56 sub x2, x2, x8
89 sub x2, x2, x8
120 sub x2, x2, x8
141 sub x2, x2, x8
147 add x13, x13, x8
[all …]
/third_party/openh264/codec/common/arm64/
Dexpand_picture_aarch64_neon.S41 mov x8, x3
55 sub x8, x8, #1
56 cbnz x8, _expand_picture_luma_loop2
68 mov x8, #32
72 sub x8, x8, #1
73 cbnz x8, _expand_picture_luma_loop1
87 mov x8, x3
101 sub x8, x8, #1
102 cbnz x8, _expand_picture_chroma_loop2
121 mov x8, #16
[all …]
/third_party/ffmpeg/libswresample/aarch64/
Daudio_convert_neon.S147 mov x8, x0
170 st1 {v16.d}[0], [x8], x12
172 st1 {v16.d}[1], [x8], x12
174 st1 {v17.d}[0], [x8], x12
176 st1 {v17.d}[1], [x8], x12
181 st1 {v18.d}[0], [x8], x12
184 st1 {v18.d}[1], [x8], x12
187 st1 {v19.d}[0], [x8], x12
190 st1 {v19.d}[1], [x8], x12
193 st1 {v18.d}[0], [x8], x12
[all …]
/third_party/ffmpeg/libavresample/aarch64/
Daudio_convert_neon.S147 mov x8, x0
170 st1 {v16.d}[0], [x8], x12
172 st1 {v16.d}[1], [x8], x12
174 st1 {v17.d}[0], [x8], x12
176 st1 {v17.d}[1], [x8], x12
181 st1 {v18.d}[0], [x8], x12
184 st1 {v18.d}[1], [x8], x12
187 st1 {v19.d}[0], [x8], x12
190 st1 {v19.d}[1], [x8], x12
193 st1 {v18.d}[0], [x8], x12
[all …]
/third_party/musl/porting/linux/user/arch/aarch64/
Dsyscall_arch.h27 register long x8 __asm__("x8") = n; in __syscall0()
29 __asm_syscall("r"(x8)); in __syscall0()
34 register long x8 __asm__("x8") = n; in __syscall1()
36 __asm_syscall("r"(x8), "0"(x0)); in __syscall1()
41 register long x8 __asm__("x8") = n; in __syscall2()
44 __asm_syscall("r"(x8), "0"(x0), "r"(x1)); in __syscall2()
49 register long x8 __asm__("x8") = n; in __syscall3()
53 __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2)); in __syscall3()
58 register long x8 __asm__("x8") = n; in __syscall4()
63 __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)); in __syscall4()
[all …]
/third_party/musl/arch/aarch64/
Dsyscall_arch.h12 register long x8 __asm__("x8") = n; in __syscall0()
14 __asm_syscall("r"(x8)); in __syscall0()
19 register long x8 __asm__("x8") = n; in __syscall1()
21 __asm_syscall("r"(x8), "0"(x0)); in __syscall1()
26 register long x8 __asm__("x8") = n; in __syscall2()
29 __asm_syscall("r"(x8), "0"(x0), "r"(x1)); in __syscall2()
34 register long x8 __asm__("x8") = n; in __syscall3()
38 __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2)); in __syscall3()
43 register long x8 __asm__("x8") = n; in __syscall4()
48 __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)); in __syscall4()
[all …]
/third_party/openssl/crypto/poly1305/asm/arm64/
Dpoly1305-armv8.S32 ldp x7,x8,[x1] // load key
38 rev x8,x8
42 and x8,x8,x9 // &=0ffffffc0ffffffc
43 stp x7,x8,[x0,#32] // save key value
50 adr x8,poly1305_emit_neon
53 csel x13,x13,x8,eq
73 ldp x7,x8,[x0,#32] // load key value
75 add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
97 mul x10,x4,x8 // h0*r1
99 umulh x14,x4,x8
[all …]
/third_party/node/deps/npm/node_modules/bcrypt-pbkdf/
Dindex.js359 function F(S, x8, i) { argument
360 return (((S[0][x8[i+3]] +
361 S[1][x8[i+2]]) ^
362 S[2][x8[i+1]]) +
363 S[3][x8[i]]);
366 Blowfish.prototype.encipher = function(x, x8) { argument
367 if (x8 === undefined) {
368 x8 = new Uint8Array(x.buffer);
370 x8 = x8.subarray(x.byteOffset);
374 x[1] ^= F(this.S, x8, 0) ^ this.P[i];
[all …]
/third_party/openssl/crypto/bn/asm/arm64/
Darmv8-mont.S20 ldp x7,x8,[x1],#16 // ap[0..1]
29 mul x10,x8,x9 // ap[1]*bp[0]
30 umulh x11,x8,x9
54 ldr x8,[x1],#8
61 mul x10,x8,x9 // ap[j]*bp[0]
63 umulh x11,x8,x9
90 ldp x7,x8,[x1],#16
98 mul x10,x8,x9 // ap[1]*bp[i]
100 umulh x11,x8,x9
115 ldr x8,[x1],#8
[all …]
/third_party/openh264/codec/encoder/core/arm64/
Dsvc_motion_estimation_aarch64_neon.S78 mov x8, x0
80 add x8, x8, x6
85 subs x0, x8, x7
112 add x8, x8, x3
120 subs x0, x8, x7
147 add x8, x8, x3
162 mov x8, x0
164 add x8, x8, x6
169 subs x0, x8, x7
190 add x8, x8, x3
[all …]
/third_party/ffmpeg/libavcodec/aarch64/
Dvp9mc_aarch64.S31 ldp x7, x8, [x2, #16]
34 stp x7, x8, [x0, #16]
40 ldp x7, x8, [x2, #80]
43 stp x7, x8, [x0, #80]
56 ldp x7, x8, [x2, #16]
59 stp x7, x8, [x0, #16]
73 ldp x7, x8, [x2, #16]
76 stp x7, x8, [x0, #16]

12345678910>>...21