• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1;************************************************************************
2;* SIMD-optimized lossless video encoding functions
3;* Copyright (c) 2000, 2001 Fabrice Bellard
4;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5;*
6;* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7;* Conversion to NASM format by Tiancheng "Timothy" Gu <timothygu99@gmail.com>
8;*
9;* This file is part of FFmpeg.
10;*
11;* FFmpeg is free software; you can redistribute it and/or
12;* modify it under the terms of the GNU Lesser General Public
13;* License as published by the Free Software Foundation; either
14;* version 2.1 of the License, or (at your option) any later version.
15;*
16;* FFmpeg is distributed in the hope that it will be useful,
17;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19;* Lesser General Public License for more details.
20;*
21;* You should have received a copy of the GNU Lesser General Public
22;* License along with FFmpeg; if not, write to the Free Software
23;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24;******************************************************************************
25
26%include "libavutil/x86/x86util.asm"
27
28cextern pb_80
29
30SECTION .text
31
32; void ff_diff_bytes(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
33;                    intptr_t w);
34%macro DIFF_BYTES_PROLOGUE 0
35%if ARCH_X86_32
36cglobal diff_bytes, 3,5,2, dst, src1, src2
37%define wq r4q
38    DECLARE_REG_TMP 3
39    mov               wq, r3mp
40%else
41cglobal diff_bytes, 4,5,2, dst, src1, src2, w
42    DECLARE_REG_TMP 4
43%endif ; ARCH_X86_32
44%define i t0q
45%endmacro
46
47; labels to jump to if w < regsize and w < 0
48%macro DIFF_BYTES_LOOP_PREP 2
49    mov                i, wq
50    and                i, -2 * regsize
51        js            %2
52        jz            %1
53    add             dstq, i
54    add            src1q, i
55    add            src2q, i
56    neg                i
57%endmacro
58
59; mov type used for src1q, dstq, first reg, second reg
60%macro DIFF_BYTES_LOOP_CORE 4
61%if mmsize != 16
62    mov%1             %3, [src1q + i]
63    mov%1             %4, [src1q + i + regsize]
64    psubb             %3, [src2q + i]
65    psubb             %4, [src2q + i + regsize]
66    mov%2           [dstq + i], %3
67    mov%2 [regsize + dstq + i], %4
68%else
69    ; SSE enforces alignment of psubb operand
70    mov%1             %3, [src1q + i]
71    movu              %4, [src2q + i]
72    psubb             %3, %4
73    mov%2     [dstq + i], %3
74    mov%1             %3, [src1q + i + regsize]
75    movu              %4, [src2q + i + regsize]
76    psubb             %3, %4
77    mov%2 [regsize + dstq + i], %3
78%endif
79%endmacro
80
81%macro DIFF_BYTES_BODY 2 ; mov type used for src1q, for dstq
82    %define regsize mmsize
83.loop_%1%2:
84    DIFF_BYTES_LOOP_CORE %1, %2, m0, m1
85    add                i, 2 * regsize
86        jl    .loop_%1%2
87.skip_main_%1%2:
88    and               wq, 2 * regsize - 1
89        jz     .end_%1%2
90%if mmsize > 16
91    ; fall back to narrower xmm
92    %define regsize (mmsize / 2)
93    DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa, .end_aa
94.loop2_%1%2:
95    DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1
96    add                i, 2 * regsize
97        jl   .loop2_%1%2
98.setup_loop_gpr_%1%2:
99    and               wq, 2 * regsize - 1
100        jz     .end_%1%2
101%endif
102    add             dstq, wq
103    add            src1q, wq
104    add            src2q, wq
105    neg               wq
106.loop_gpr_%1%2:
107    mov              t0b, [src1q + wq]
108    sub              t0b, [src2q + wq]
109    mov      [dstq + wq], t0b
110    inc               wq
111        jl .loop_gpr_%1%2
112.end_%1%2:
113    REP_RET
114%endmacro
115
116%if ARCH_X86_32
117INIT_MMX mmx
118DIFF_BYTES_PROLOGUE
119    %define regsize mmsize
120    DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
121    DIFF_BYTES_BODY    a, a
122%undef i
123%endif
124
125INIT_XMM sse2
126DIFF_BYTES_PROLOGUE
127    %define regsize mmsize
128    DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
129    test            dstq, regsize - 1
130        jnz     .loop_uu
131    test           src1q, regsize - 1
132        jnz     .loop_ua
133    DIFF_BYTES_BODY    a, a
134    DIFF_BYTES_BODY    u, a
135    DIFF_BYTES_BODY    u, u
136%undef i
137
138%if HAVE_AVX2_EXTERNAL
139INIT_YMM avx2
140DIFF_BYTES_PROLOGUE
141    %define regsize mmsize
142    ; Directly using unaligned SSE2 version is marginally faster than
143    ; branching based on arguments.
144    DIFF_BYTES_LOOP_PREP .skip_main_uu, .end_uu
145    test            dstq, regsize - 1
146        jnz     .loop_uu
147    test           src1q, regsize - 1
148        jnz     .loop_ua
149    DIFF_BYTES_BODY    a, a
150    DIFF_BYTES_BODY    u, a
151    DIFF_BYTES_BODY    u, u
152%undef i
153%endif
154
155
156;--------------------------------------------------------------------------------------------------
157;void sub_left_predict(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height)
158;--------------------------------------------------------------------------------------------------
159
160INIT_XMM avx
161cglobal sub_left_predict, 5,6,5, dst, src, stride, width, height, x
162    mova             m1, [pb_80] ; prev initial
163    add            dstq, widthq
164    add            srcq, widthq
165    lea              xd, [widthq-1]
166    neg          widthq
167    and              xd, 15
168    pinsrb           m4, m1, xd, 15
169    mov              xq, widthq
170
171    .loop:
172        movu                     m0, [srcq + widthq]
173        palignr                  m2, m0, m1, 15
174        movu                     m1, [srcq + widthq + 16]
175        palignr                  m3, m1, m0, 15
176        psubb                    m2, m0, m2
177        psubb                    m3, m1, m3
178        movu        [dstq + widthq], m2
179        movu   [dstq + widthq + 16], m3
180        add                  widthq, 2 * 16
181        jl .loop
182
183    add   srcq, strideq
184    sub   dstq, xq ; dst + width
185    test    xd, 16
186    jz .mod32
187    mova    m1, m0
188
189.mod32:
190    pshufb    m1, m4
191    mov   widthq, xq
192    dec  heightd
193    jg .loop
194    RET
195