• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1;
2; jfdctflt.asm - floating-point FDCT (64-bit SSE)
3;
4; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5; Copyright (C) 2009, 2016, D. R. Commander.
6;
7; Based on the x86 SIMD extension for IJG JPEG library
8; Copyright (C) 1999-2006, MIYASAKA Masaru.
9; For conditions of distribution and use, see copyright notice in jsimdext.inc
10;
11; This file should be assembled with NASM (Netwide Assembler),
12; can *not* be assembled with Microsoft's MASM or any compatible
13; assembler (including Borland's Turbo Assembler).
14; NASM is available from http://nasm.sourceforge.net/ or
15; http://sourceforge.net/project/showfiles.php?group_id=6208
16;
17; This file contains a floating-point implementation of the forward DCT
18; (Discrete Cosine Transform). The following code is based directly on
19; the IJG's original jfdctflt.c; see the jfdctflt.c for more details.
20
21%include "jsimdext.inc"
22%include "jdct.inc"
23
24; --------------------------------------------------------------------------
25
26%macro  unpcklps2 2  ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(0 1 4 5)
27    shufps      %1, %2, 0x44
28%endmacro
29
30%macro  unpckhps2 2  ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(2 3 6 7)
31    shufps      %1, %2, 0xEE
32%endmacro
33
34; --------------------------------------------------------------------------
35    SECTION     SEG_CONST
36
37    alignz      32
38    GLOBAL_DATA(jconst_fdct_float_sse)
39
40EXTN(jconst_fdct_float_sse):
41
42PD_0_382 times 4 dd 0.382683432365089771728460
43PD_0_707 times 4 dd 0.707106781186547524400844
44PD_0_541 times 4 dd 0.541196100146196984399723
45PD_1_306 times 4 dd 1.306562964876376527856643
46
47    alignz      32
48
49; --------------------------------------------------------------------------
50    SECTION     SEG_TEXT
51    BITS        64
52;
53; Perform the forward DCT on one block of samples.
54;
55; GLOBAL(void)
56; jsimd_fdct_float_sse(FAST_FLOAT *data)
57;
58
59; r10 = FAST_FLOAT *data
60
61%define wk(i)   rbp - (WK_NUM - (i)) * SIZEOF_XMMWORD  ; xmmword wk[WK_NUM]
62%define WK_NUM  2
63
64    align       32
65    GLOBAL_FUNCTION(jsimd_fdct_float_sse)
66
67EXTN(jsimd_fdct_float_sse):
68    push        rbp
69    mov         rax, rsp                     ; rax = original rbp
70    sub         rsp, byte 4
71    and         rsp, byte (-SIZEOF_XMMWORD)  ; align to 128 bits
72    mov         [rsp], rax
73    mov         rbp, rsp                     ; rbp = aligned rbp
74    lea         rsp, [wk(0)]
75    collect_args 1
76
77    ; ---- Pass 1: process rows.
78
79    mov         rdx, r10                ; (FAST_FLOAT *)
80    mov         rcx, DCTSIZE/4
81.rowloop:
82
83    movaps      xmm0, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)]
84    movaps      xmm1, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)]
85    movaps      xmm2, XMMWORD [XMMBLOCK(2,1,rdx,SIZEOF_FAST_FLOAT)]
86    movaps      xmm3, XMMWORD [XMMBLOCK(3,1,rdx,SIZEOF_FAST_FLOAT)]
87
88    ; xmm0=(20 21 22 23), xmm2=(24 25 26 27)
89    ; xmm1=(30 31 32 33), xmm3=(34 35 36 37)
90
91    movaps      xmm4, xmm0              ; transpose coefficients(phase 1)
92    unpcklps    xmm0, xmm1              ; xmm0=(20 30 21 31)
93    unpckhps    xmm4, xmm1              ; xmm4=(22 32 23 33)
94    movaps      xmm5, xmm2              ; transpose coefficients(phase 1)
95    unpcklps    xmm2, xmm3              ; xmm2=(24 34 25 35)
96    unpckhps    xmm5, xmm3              ; xmm5=(26 36 27 37)
97
98    movaps      xmm6, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
99    movaps      xmm7, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
100    movaps      xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)]
101    movaps      xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)]
102
103    ; xmm6=(00 01 02 03), xmm1=(04 05 06 07)
104    ; xmm7=(10 11 12 13), xmm3=(14 15 16 17)
105
106    movaps      XMMWORD [wk(0)], xmm4   ; wk(0)=(22 32 23 33)
107    movaps      XMMWORD [wk(1)], xmm2   ; wk(1)=(24 34 25 35)
108
109    movaps      xmm4, xmm6              ; transpose coefficients(phase 1)
110    unpcklps    xmm6, xmm7              ; xmm6=(00 10 01 11)
111    unpckhps    xmm4, xmm7              ; xmm4=(02 12 03 13)
112    movaps      xmm2, xmm1              ; transpose coefficients(phase 1)
113    unpcklps    xmm1, xmm3              ; xmm1=(04 14 05 15)
114    unpckhps    xmm2, xmm3              ; xmm2=(06 16 07 17)
115
116    movaps      xmm7, xmm6              ; transpose coefficients(phase 2)
117    unpcklps2   xmm6, xmm0              ; xmm6=(00 10 20 30)=data0
118    unpckhps2   xmm7, xmm0              ; xmm7=(01 11 21 31)=data1
119    movaps      xmm3, xmm2              ; transpose coefficients(phase 2)
120    unpcklps2   xmm2, xmm5              ; xmm2=(06 16 26 36)=data6
121    unpckhps2   xmm3, xmm5              ; xmm3=(07 17 27 37)=data7
122
123    movaps      xmm0, xmm7
124    movaps      xmm5, xmm6
125    subps       xmm7, xmm2              ; xmm7=data1-data6=tmp6
126    subps       xmm6, xmm3              ; xmm6=data0-data7=tmp7
127    addps       xmm0, xmm2              ; xmm0=data1+data6=tmp1
128    addps       xmm5, xmm3              ; xmm5=data0+data7=tmp0
129
130    movaps      xmm2, XMMWORD [wk(0)]   ; xmm2=(22 32 23 33)
131    movaps      xmm3, XMMWORD [wk(1)]   ; xmm3=(24 34 25 35)
132    movaps      XMMWORD [wk(0)], xmm7   ; wk(0)=tmp6
133    movaps      XMMWORD [wk(1)], xmm6   ; wk(1)=tmp7
134
135    movaps      xmm7, xmm4              ; transpose coefficients(phase 2)
136    unpcklps2   xmm4, xmm2              ; xmm4=(02 12 22 32)=data2
137    unpckhps2   xmm7, xmm2              ; xmm7=(03 13 23 33)=data3
138    movaps      xmm6, xmm1              ; transpose coefficients(phase 2)
139    unpcklps2   xmm1, xmm3              ; xmm1=(04 14 24 34)=data4
140    unpckhps2   xmm6, xmm3              ; xmm6=(05 15 25 35)=data5
141
142    movaps      xmm2, xmm7
143    movaps      xmm3, xmm4
144    addps       xmm7, xmm1              ; xmm7=data3+data4=tmp3
145    addps       xmm4, xmm6              ; xmm4=data2+data5=tmp2
146    subps       xmm2, xmm1              ; xmm2=data3-data4=tmp4
147    subps       xmm3, xmm6              ; xmm3=data2-data5=tmp5
148
149    ; -- Even part
150
151    movaps      xmm1, xmm5
152    movaps      xmm6, xmm0
153    subps       xmm5, xmm7              ; xmm5=tmp13
154    subps       xmm0, xmm4              ; xmm0=tmp12
155    addps       xmm1, xmm7              ; xmm1=tmp10
156    addps       xmm6, xmm4              ; xmm6=tmp11
157
158    addps       xmm0, xmm5
159    mulps       xmm0, [rel PD_0_707]    ; xmm0=z1
160
161    movaps      xmm7, xmm1
162    movaps      xmm4, xmm5
163    subps       xmm1, xmm6              ; xmm1=data4
164    subps       xmm5, xmm0              ; xmm5=data6
165    addps       xmm7, xmm6              ; xmm7=data0
166    addps       xmm4, xmm0              ; xmm4=data2
167
168    movaps      XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)], xmm1
169    movaps      XMMWORD [XMMBLOCK(2,1,rdx,SIZEOF_FAST_FLOAT)], xmm5
170    movaps      XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
171    movaps      XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
172
173    ; -- Odd part
174
175    movaps      xmm6, XMMWORD [wk(0)]   ; xmm6=tmp6
176    movaps      xmm0, XMMWORD [wk(1)]   ; xmm0=tmp7
177
178    addps       xmm2, xmm3              ; xmm2=tmp10
179    addps       xmm3, xmm6              ; xmm3=tmp11
180    addps       xmm6, xmm0              ; xmm6=tmp12, xmm0=tmp7
181
182    mulps       xmm3, [rel PD_0_707]    ; xmm3=z3
183
184    movaps      xmm1, xmm2              ; xmm1=tmp10
185    subps       xmm2, xmm6
186    mulps       xmm2, [rel PD_0_382]    ; xmm2=z5
187    mulps       xmm1, [rel PD_0_541]    ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
188    mulps       xmm6, [rel PD_1_306]    ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
189    addps       xmm1, xmm2              ; xmm1=z2
190    addps       xmm6, xmm2              ; xmm6=z4
191
192    movaps      xmm5, xmm0
193    subps       xmm0, xmm3              ; xmm0=z13
194    addps       xmm5, xmm3              ; xmm5=z11
195
196    movaps      xmm7, xmm0
197    movaps      xmm4, xmm5
198    subps       xmm0, xmm1              ; xmm0=data3
199    subps       xmm5, xmm6              ; xmm5=data7
200    addps       xmm7, xmm1              ; xmm7=data5
201    addps       xmm4, xmm6              ; xmm4=data1
202
203    movaps      XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)], xmm0
204    movaps      XMMWORD [XMMBLOCK(3,1,rdx,SIZEOF_FAST_FLOAT)], xmm5
205    movaps      XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)], xmm7
206    movaps      XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
207
208    add         rdx, 4*DCTSIZE*SIZEOF_FAST_FLOAT
209    dec         rcx
210    jnz         near .rowloop
211
212    ; ---- Pass 2: process columns.
213
214    mov         rdx, r10                ; (FAST_FLOAT *)
215    mov         rcx, DCTSIZE/4
216.columnloop:
217
218    movaps      xmm0, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)]
219    movaps      xmm1, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)]
220    movaps      xmm2, XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_FAST_FLOAT)]
221    movaps      xmm3, XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_FAST_FLOAT)]
222
223    ; xmm0=(02 12 22 32), xmm2=(42 52 62 72)
224    ; xmm1=(03 13 23 33), xmm3=(43 53 63 73)
225
226    movaps      xmm4, xmm0              ; transpose coefficients(phase 1)
227    unpcklps    xmm0, xmm1              ; xmm0=(02 03 12 13)
228    unpckhps    xmm4, xmm1              ; xmm4=(22 23 32 33)
229    movaps      xmm5, xmm2              ; transpose coefficients(phase 1)
230    unpcklps    xmm2, xmm3              ; xmm2=(42 43 52 53)
231    unpckhps    xmm5, xmm3              ; xmm5=(62 63 72 73)
232
233    movaps      xmm6, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
234    movaps      xmm7, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
235    movaps      xmm1, XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_FAST_FLOAT)]
236    movaps      xmm3, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_FAST_FLOAT)]
237
238    ; xmm6=(00 10 20 30), xmm1=(40 50 60 70)
239    ; xmm7=(01 11 21 31), xmm3=(41 51 61 71)
240
241    movaps      XMMWORD [wk(0)], xmm4   ; wk(0)=(22 23 32 33)
242    movaps      XMMWORD [wk(1)], xmm2   ; wk(1)=(42 43 52 53)
243
244    movaps      xmm4, xmm6              ; transpose coefficients(phase 1)
245    unpcklps    xmm6, xmm7              ; xmm6=(00 01 10 11)
246    unpckhps    xmm4, xmm7              ; xmm4=(20 21 30 31)
247    movaps      xmm2, xmm1              ; transpose coefficients(phase 1)
248    unpcklps    xmm1, xmm3              ; xmm1=(40 41 50 51)
249    unpckhps    xmm2, xmm3              ; xmm2=(60 61 70 71)
250
251    movaps      xmm7, xmm6              ; transpose coefficients(phase 2)
252    unpcklps2   xmm6, xmm0              ; xmm6=(00 01 02 03)=data0
253    unpckhps2   xmm7, xmm0              ; xmm7=(10 11 12 13)=data1
254    movaps      xmm3, xmm2              ; transpose coefficients(phase 2)
255    unpcklps2   xmm2, xmm5              ; xmm2=(60 61 62 63)=data6
256    unpckhps2   xmm3, xmm5              ; xmm3=(70 71 72 73)=data7
257
258    movaps      xmm0, xmm7
259    movaps      xmm5, xmm6
260    subps       xmm7, xmm2              ; xmm7=data1-data6=tmp6
261    subps       xmm6, xmm3              ; xmm6=data0-data7=tmp7
262    addps       xmm0, xmm2              ; xmm0=data1+data6=tmp1
263    addps       xmm5, xmm3              ; xmm5=data0+data7=tmp0
264
265    movaps      xmm2, XMMWORD [wk(0)]   ; xmm2=(22 23 32 33)
266    movaps      xmm3, XMMWORD [wk(1)]   ; xmm3=(42 43 52 53)
267    movaps      XMMWORD [wk(0)], xmm7   ; wk(0)=tmp6
268    movaps      XMMWORD [wk(1)], xmm6   ; wk(1)=tmp7
269
270    movaps      xmm7, xmm4              ; transpose coefficients(phase 2)
271    unpcklps2   xmm4, xmm2              ; xmm4=(20 21 22 23)=data2
272    unpckhps2   xmm7, xmm2              ; xmm7=(30 31 32 33)=data3
273    movaps      xmm6, xmm1              ; transpose coefficients(phase 2)
274    unpcklps2   xmm1, xmm3              ; xmm1=(40 41 42 43)=data4
275    unpckhps2   xmm6, xmm3              ; xmm6=(50 51 52 53)=data5
276
277    movaps      xmm2, xmm7
278    movaps      xmm3, xmm4
279    addps       xmm7, xmm1              ; xmm7=data3+data4=tmp3
280    addps       xmm4, xmm6              ; xmm4=data2+data5=tmp2
281    subps       xmm2, xmm1              ; xmm2=data3-data4=tmp4
282    subps       xmm3, xmm6              ; xmm3=data2-data5=tmp5
283
284    ; -- Even part
285
286    movaps      xmm1, xmm5
287    movaps      xmm6, xmm0
288    subps       xmm5, xmm7              ; xmm5=tmp13
289    subps       xmm0, xmm4              ; xmm0=tmp12
290    addps       xmm1, xmm7              ; xmm1=tmp10
291    addps       xmm6, xmm4              ; xmm6=tmp11
292
293    addps       xmm0, xmm5
294    mulps       xmm0, [rel PD_0_707]    ; xmm0=z1
295
296    movaps      xmm7, xmm1
297    movaps      xmm4, xmm5
298    subps       xmm1, xmm6              ; xmm1=data4
299    subps       xmm5, xmm0              ; xmm5=data6
300    addps       xmm7, xmm6              ; xmm7=data0
301    addps       xmm4, xmm0              ; xmm4=data2
302
303    movaps      XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_FAST_FLOAT)], xmm1
304    movaps      XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_FAST_FLOAT)], xmm5
305    movaps      XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
306    movaps      XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
307
308    ; -- Odd part
309
310    movaps      xmm6, XMMWORD [wk(0)]   ; xmm6=tmp6
311    movaps      xmm0, XMMWORD [wk(1)]   ; xmm0=tmp7
312
313    addps       xmm2, xmm3              ; xmm2=tmp10
314    addps       xmm3, xmm6              ; xmm3=tmp11
315    addps       xmm6, xmm0              ; xmm6=tmp12, xmm0=tmp7
316
317    mulps       xmm3, [rel PD_0_707]    ; xmm3=z3
318
319    movaps      xmm1, xmm2              ; xmm1=tmp10
320    subps       xmm2, xmm6
321    mulps       xmm2, [rel PD_0_382]    ; xmm2=z5
322    mulps       xmm1, [rel PD_0_541]    ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
323    mulps       xmm6, [rel PD_1_306]    ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
324    addps       xmm1, xmm2              ; xmm1=z2
325    addps       xmm6, xmm2              ; xmm6=z4
326
327    movaps      xmm5, xmm0
328    subps       xmm0, xmm3              ; xmm0=z13
329    addps       xmm5, xmm3              ; xmm5=z11
330
331    movaps      xmm7, xmm0
332    movaps      xmm4, xmm5
333    subps       xmm0, xmm1              ; xmm0=data3
334    subps       xmm5, xmm6              ; xmm5=data7
335    addps       xmm7, xmm1              ; xmm7=data5
336    addps       xmm4, xmm6              ; xmm4=data1
337
338    movaps      XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)], xmm0
339    movaps      XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_FAST_FLOAT)], xmm5
340    movaps      XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
341    movaps      XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
342
343    add         rdx, byte 4*SIZEOF_FAST_FLOAT
344    dec         rcx
345    jnz         near .columnloop
346
347    uncollect_args 1
348    mov         rsp, rbp                ; rsp <- aligned rbp
349    pop         rsp                     ; rsp <- original rbp
350    pop         rbp
351    ret
352
353; For some reason, the OS X linker does not honor the request to align the
354; segment unless we do this.
355    align       32
356