• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1;
2; jdmrgext.asm - merged upsampling/color conversion (64-bit AVX2)
3;
4; Copyright 2009, 2012 Pierre Ossman <ossman@cendio.se> for Cendio AB
5; Copyright (C) 2009, 2012, 2016, D. R. Commander.
6; Copyright (C) 2015, Intel Corporation.
7;
8; Based on the x86 SIMD extension for IJG JPEG library
9; Copyright (C) 1999-2006, MIYASAKA Masaru.
10; For conditions of distribution and use, see copyright notice in jsimdext.inc
11;
12; This file should be assembled with NASM (Netwide Assembler),
13; can *not* be assembled with Microsoft's MASM or any compatible
14; assembler (including Borland's Turbo Assembler).
15; NASM is available from http://nasm.sourceforge.net/ or
16; http://sourceforge.net/project/showfiles.php?group_id=6208
17
18%include "jcolsamp.inc"
19
20; --------------------------------------------------------------------------
21;
22; Upsample and color convert for the case of 2:1 horizontal and 1:1 vertical.
23;
24; GLOBAL(void)
25; jsimd_h2v1_merged_upsample_avx2(JDIMENSION output_width,
26;                                 JSAMPIMAGE input_buf,
27;                                 JDIMENSION in_row_group_ctr,
28;                                 JSAMPARRAY output_buf);
29;
30
31; r10d = JDIMENSION output_width
32; r11 = JSAMPIMAGE input_buf
33; r12d = JDIMENSION in_row_group_ctr
34; r13 = JSAMPARRAY output_buf
35
36%define wk(i)   rbp - (WK_NUM - (i)) * SIZEOF_YMMWORD  ; ymmword wk[WK_NUM]
37%define WK_NUM  3
38
39    align       32
40    GLOBAL_FUNCTION(jsimd_h2v1_merged_upsample_avx2)
41
42EXTN(jsimd_h2v1_merged_upsample_avx2):
43    push        rbp
44    mov         rax, rsp                     ; rax = original rbp
45    sub         rsp, byte 4
46    and         rsp, byte (-SIZEOF_YMMWORD)  ; align to 256 bits
47    mov         [rsp], rax
48    mov         rbp, rsp                     ; rbp = aligned rbp
49    lea         rsp, [wk(0)]
50    collect_args 4
51    push        rbx
52
53    mov         ecx, r10d               ; col
54    test        rcx, rcx
55    jz          near .return
56
57    push        rcx
58
59    mov         rdi, r11
60    mov         ecx, r12d
61    mov         rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY]
62    mov         rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY]
63    mov         rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY]
64    mov         rdi, r13
65    mov         rsi, JSAMPROW [rsi+rcx*SIZEOF_JSAMPROW]  ; inptr0
66    mov         rbx, JSAMPROW [rbx+rcx*SIZEOF_JSAMPROW]  ; inptr1
67    mov         rdx, JSAMPROW [rdx+rcx*SIZEOF_JSAMPROW]  ; inptr2
68    mov         rdi, JSAMPROW [rdi]                      ; outptr
69
70    pop         rcx                     ; col
71
72.columnloop:
73
74    vmovdqu     ymm6, YMMWORD [rbx]     ; ymm6=Cb(0123456789ABCDEFGHIJKLMNOPQRSTUV)
75    vmovdqu     ymm7, YMMWORD [rdx]     ; ymm7=Cr(0123456789ABCDEFGHIJKLMNOPQRSTUV)
76
77    vpxor       ymm1, ymm1, ymm1        ; ymm1=(all 0's)
78    vpcmpeqw    ymm3, ymm3, ymm3
79    vpsllw      ymm3, ymm3, 7           ; ymm3={0xFF80 0xFF80 0xFF80 0xFF80 ..}
80
81    vpermq      ymm6, ymm6, 0xd8        ; ymm6=Cb(01234567GHIJKLMN89ABCDEFOPQRSTUV)
82    vpermq      ymm7, ymm7, 0xd8        ; ymm7=Cr(01234567GHIJKLMN89ABCDEFOPQRSTUV)
83    vpunpcklbw  ymm4, ymm6, ymm1        ; ymm4=Cb(0123456789ABCDEF)=CbL
84    vpunpckhbw  ymm6, ymm6, ymm1        ; ymm6=Cb(GHIJKLMNOPQRSTUV)=CbH
85    vpunpcklbw  ymm0, ymm7, ymm1        ; ymm0=Cr(0123456789ABCDEF)=CrL
86    vpunpckhbw  ymm7, ymm7, ymm1        ; ymm7=Cr(GHIJKLMNOPQRSTUV)=CrH
87
88    vpaddw      ymm5, ymm6, ymm3
89    vpaddw      ymm2, ymm4, ymm3
90    vpaddw      ymm1, ymm7, ymm3
91    vpaddw      ymm3, ymm0, ymm3
92
93    ; (Original)
94    ; R = Y                + 1.40200 * Cr
95    ; G = Y - 0.34414 * Cb - 0.71414 * Cr
96    ; B = Y + 1.77200 * Cb
97    ;
98    ; (This implementation)
99    ; R = Y                + 0.40200 * Cr + Cr
100    ; G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
101    ; B = Y - 0.22800 * Cb + Cb + Cb
102
103    vpaddw      ymm6, ymm5, ymm5             ; ymm6=2*CbH
104    vpaddw      ymm4, ymm2, ymm2             ; ymm4=2*CbL
105    vpaddw      ymm7, ymm1, ymm1             ; ymm7=2*CrH
106    vpaddw      ymm0, ymm3, ymm3             ; ymm0=2*CrL
107
108    vpmulhw     ymm6, ymm6, [rel PW_MF0228]  ; ymm6=(2*CbH * -FIX(0.22800))
109    vpmulhw     ymm4, ymm4, [rel PW_MF0228]  ; ymm4=(2*CbL * -FIX(0.22800))
110    vpmulhw     ymm7, ymm7, [rel PW_F0402]   ; ymm7=(2*CrH * FIX(0.40200))
111    vpmulhw     ymm0, ymm0, [rel PW_F0402]   ; ymm0=(2*CrL * FIX(0.40200))
112
113    vpaddw      ymm6, ymm6, [rel PW_ONE]
114    vpaddw      ymm4, ymm4, [rel PW_ONE]
115    vpsraw      ymm6, ymm6, 1                ; ymm6=(CbH * -FIX(0.22800))
116    vpsraw      ymm4, ymm4, 1                ; ymm4=(CbL * -FIX(0.22800))
117    vpaddw      ymm7, ymm7, [rel PW_ONE]
118    vpaddw      ymm0, ymm0, [rel PW_ONE]
119    vpsraw      ymm7, ymm7, 1                ; ymm7=(CrH * FIX(0.40200))
120    vpsraw      ymm0, ymm0, 1                ; ymm0=(CrL * FIX(0.40200))
121
122    vpaddw      ymm6, ymm6, ymm5
123    vpaddw      ymm4, ymm4, ymm2
124    vpaddw      ymm6, ymm6, ymm5             ; ymm6=(CbH * FIX(1.77200))=(B-Y)H
125    vpaddw      ymm4, ymm4, ymm2             ; ymm4=(CbL * FIX(1.77200))=(B-Y)L
126    vpaddw      ymm7, ymm7, ymm1             ; ymm7=(CrH * FIX(1.40200))=(R-Y)H
127    vpaddw      ymm0, ymm0, ymm3             ; ymm0=(CrL * FIX(1.40200))=(R-Y)L
128
129    vmovdqa     YMMWORD [wk(0)], ymm6        ; wk(0)=(B-Y)H
130    vmovdqa     YMMWORD [wk(1)], ymm7        ; wk(1)=(R-Y)H
131
132    vpunpckhwd  ymm6, ymm5, ymm1
133    vpunpcklwd  ymm5, ymm5, ymm1
134    vpmaddwd    ymm5, ymm5, [rel PW_MF0344_F0285]
135    vpmaddwd    ymm6, ymm6, [rel PW_MF0344_F0285]
136    vpunpckhwd  ymm7, ymm2, ymm3
137    vpunpcklwd  ymm2, ymm2, ymm3
138    vpmaddwd    ymm2, ymm2, [rel PW_MF0344_F0285]
139    vpmaddwd    ymm7, ymm7, [rel PW_MF0344_F0285]
140
141    vpaddd      ymm5, ymm5, [rel PD_ONEHALF]
142    vpaddd      ymm6, ymm6, [rel PD_ONEHALF]
143    vpsrad      ymm5, ymm5, SCALEBITS
144    vpsrad      ymm6, ymm6, SCALEBITS
145    vpaddd      ymm2, ymm2, [rel PD_ONEHALF]
146    vpaddd      ymm7, ymm7, [rel PD_ONEHALF]
147    vpsrad      ymm2, ymm2, SCALEBITS
148    vpsrad      ymm7, ymm7, SCALEBITS
149
150    vpackssdw   ymm5, ymm5, ymm6        ; ymm5=CbH*-FIX(0.344)+CrH*FIX(0.285)
151    vpackssdw   ymm2, ymm2, ymm7        ; ymm2=CbL*-FIX(0.344)+CrL*FIX(0.285)
152    vpsubw      ymm5, ymm5, ymm1        ; ymm5=CbH*-FIX(0.344)+CrH*-FIX(0.714)=(G-Y)H
153    vpsubw      ymm2, ymm2, ymm3        ; ymm2=CbL*-FIX(0.344)+CrL*-FIX(0.714)=(G-Y)L
154
155    vmovdqa     YMMWORD [wk(2)], ymm5   ; wk(2)=(G-Y)H
156
157    mov         al, 2                   ; Yctr
158    jmp         short .Yloop_1st
159
160.Yloop_2nd:
161    vmovdqa     ymm0, YMMWORD [wk(1)]   ; ymm0=(R-Y)H
162    vmovdqa     ymm2, YMMWORD [wk(2)]   ; ymm2=(G-Y)H
163    vmovdqa     ymm4, YMMWORD [wk(0)]   ; ymm4=(B-Y)H
164
165.Yloop_1st:
166    vmovdqu     ymm7, YMMWORD [rsi]     ; ymm7=Y(0123456789ABCDEFGHIJKLMNOPQRSTUV)
167
168    vpcmpeqw    ymm6, ymm6, ymm6
169    vpsrlw      ymm6, ymm6, BYTE_BIT    ; ymm6={0xFF 0x00 0xFF 0x00 ..}
170    vpand       ymm6, ymm6, ymm7        ; ymm6=Y(02468ACEGIKMOQSU)=YE
171    vpsrlw      ymm7, ymm7, BYTE_BIT    ; ymm7=Y(13579BDFHJLNPRTV)=YO
172
173    vmovdqa     ymm1, ymm0              ; ymm1=ymm0=(R-Y)(L/H)
174    vmovdqa     ymm3, ymm2              ; ymm3=ymm2=(G-Y)(L/H)
175    vmovdqa     ymm5, ymm4              ; ymm5=ymm4=(B-Y)(L/H)
176
177    vpaddw      ymm0, ymm0, ymm6        ; ymm0=((R-Y)+YE)=RE=R(02468ACEGIKMOQSU)
178    vpaddw      ymm1, ymm1, ymm7        ; ymm1=((R-Y)+YO)=RO=R(13579BDFHJLNPRTV)
179    vpackuswb   ymm0, ymm0, ymm0        ; ymm0=R(02468ACE********GIKMOQSU********)
180    vpackuswb   ymm1, ymm1, ymm1        ; ymm1=R(13579BDF********HJLNPRTV********)
181
182    vpaddw      ymm2, ymm2, ymm6        ; ymm2=((G-Y)+YE)=GE=G(02468ACEGIKMOQSU)
183    vpaddw      ymm3, ymm3, ymm7        ; ymm3=((G-Y)+YO)=GO=G(13579BDFHJLNPRTV)
184    vpackuswb   ymm2, ymm2, ymm2        ; ymm2=G(02468ACE********GIKMOQSU********)
185    vpackuswb   ymm3, ymm3, ymm3        ; ymm3=G(13579BDF********HJLNPRTV********)
186
187    vpaddw      ymm4, ymm4, ymm6        ; ymm4=((B-Y)+YE)=BE=B(02468ACEGIKMOQSU)
188    vpaddw      ymm5, ymm5, ymm7        ; ymm5=((B-Y)+YO)=BO=B(13579BDFHJLNPRTV)
189    vpackuswb   ymm4, ymm4, ymm4        ; ymm4=B(02468ACE********GIKMOQSU********)
190    vpackuswb   ymm5, ymm5, ymm5        ; ymm5=B(13579BDF********HJLNPRTV********)
191
192%if RGB_PIXELSIZE == 3  ; ---------------
193
194    ; ymmA=(00 02 04 06 08 0A 0C 0E ** 0G 0I 0K 0M 0O 0Q 0S 0U **)
195    ; ymmB=(01 03 05 07 09 0B 0D 0F ** 0H 0J 0L 0N 0P 0R 0T 0V **)
196    ; ymmC=(10 12 14 16 18 1A 1C 1E ** 1G 1I 1K 1M 1O 1Q 1S 1U **)
197    ; ymmD=(11 13 15 17 19 1B 1D 1F ** 1H 1J 1L 1N 1P 1R 1T 1V **)
198    ; ymmE=(20 22 24 26 28 2A 2C 2E ** 2G 2I 2K 2M 2O 2Q 2S 2U **)
199    ; ymmF=(21 23 25 27 29 2B 2D 2F ** 2H 2J 2L 2N 2P 2R 2T 2V **)
200    ; ymmG=(** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **)
201    ; ymmH=(** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **)
202
203    vpunpcklbw  ymmA, ymmA, ymmC        ; ymmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E
204                                        ;       0G 1G 0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U)
205    vpunpcklbw  ymmE, ymmE, ymmB        ; ymmE=(20 01 22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F
206                                        ;       2G 0H 2I 0J 2K 0L 2M 0N 2O 0P 2Q 0R 2S 0T 2U 0V)
207    vpunpcklbw  ymmD, ymmD, ymmF        ; ymmD=(11 21 13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F
208                                        ;       1H 2H 1J 2J 1L 2L 1N 2N 1P 2P 1R 2R 1T 2T 1V 2V)
209
210    vpsrldq     ymmH, ymmA, 2           ; ymmH=(02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E 0G 1G
211                                        ;       0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U -- --)
212    vpunpckhwd  ymmG, ymmA, ymmE        ; ymmG=(08 18 28 09 0A 1A 2A 0B 0C 1C 2C 0D 0E 1E 2E 0F
213                                        ;       0O 1O 2O 0P 0Q 1Q 2Q 0R 0S 1S 2S 0T 0U 1U 2U 0V)
214    vpunpcklwd  ymmA, ymmA, ymmE        ; ymmA=(00 10 20 01 02 12 22 03 04 14 24 05 06 16 26 07
215                                        ;       0G 1G 2G 0H 0I 1I 2I 0J 0K 1K 2K 0L 0M 1M 2M 0N)
216
217    vpsrldq     ymmE, ymmE, 2           ; ymmE=(22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F 2G 0H
218                                        ;       2I 0J 2K 0L 2M 0N 2O 0P 2Q 0R 2S 0T 2U 0V -- --)
219
220    vpsrldq     ymmB, ymmD, 2           ; ymmB=(13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F 1H 2H
221                                        ;       1J 2J 1L 2L 1N 2N 1P 2P 1R 2R 1T 2T 1V 2V -- --)
222    vpunpckhwd  ymmC, ymmD, ymmH        ; ymmC=(19 29 0A 1A 1B 2B 0C 1C 1D 2D 0E 1E 1F 2F 0G 1G
223                                        ;       1P 2P 0Q 1Q 1R 2R 0S 1S 1T 2T 0U 1U 1V 2V -- --)
224    vpunpcklwd  ymmD, ymmD, ymmH        ; ymmD=(11 21 02 12 13 23 04 14 15 25 06 16 17 27 08 18
225                                        ;       1H 2H 0I 1I 1J 2J 0K 1K 1L 2L 0M 1M 1N 2N 0O 1O)
226
227    vpunpckhwd  ymmF, ymmE, ymmB        ; ymmF=(2A 0B 1B 2B 2C 0D 1D 2D 2E 0F 1F 2F 2G 0H 1H 2H
228                                        ;       2Q 0R 1R 2R 2S 0T 1T 2T 2U 0V 1V 2V -- -- -- --)
229    vpunpcklwd  ymmE, ymmE, ymmB        ; ymmE=(22 03 13 23 24 05 15 25 26 07 17 27 28 09 19 29
230                                        ;       2I 0J 1J 2J 2K 0L 1L 2L 2M 0N 1N 2N 2O 0P 1P 2P)
231
232    vpshufd     ymmH, ymmA, 0x4E        ; ymmH=(04 14 24 05 06 16 26 07 00 10 20 01 02 12 22 03
233                                        ;       0K 1K 2K 0L 0M 1M 2M 0N 0G 1G 2G 0H 0I 1I 2I 0J)
234    vpunpckldq  ymmA, ymmA, ymmD        ; ymmA=(00 10 20 01 11 21 02 12 02 12 22 03 13 23 04 14
235                                        ;       0G 1G 2G 0H 1H 2H 0I 1I 0I 1I 2I 0J 1J 2J 0K 1K)
236    vpunpckhdq  ymmD, ymmD, ymmE        ; ymmD=(15 25 06 16 26 07 17 27 17 27 08 18 28 09 19 29
237                                        ;       1L 2L 0M 1M 2M 0N 1N 2N 1N 2N 0O 1O 2O 0P 1P 2P)
238    vpunpckldq  ymmE, ymmE, ymmH        ; ymmE=(22 03 13 23 04 14 24 05 24 05 15 25 06 16 26 07
239                                        ;       2I 0J 1J 2J 0K 1K 2K 0L 2K 0L 1L 2L 0M 1M 2M 0N)
240
241    vpshufd     ymmH, ymmG, 0x4E        ; ymmH=(0C 1C 2C 0D 0E 1E 2E 0F 08 18 28 09 0A 1A 2A 0B
242                                        ;       0S 1S 2S 0T 0U 1U 2U 0V 0O 1O 2O 0P 0Q 1Q 2Q 0R)
243    vpunpckldq  ymmG, ymmG, ymmC        ; ymmG=(08 18 28 09 19 29 0A 1A 0A 1A 2A 0B 1B 2B 0C 1C
244                                        ;       0O 1O 2O 0P 1P 2P 0Q 1Q 0Q 1Q 2Q 0R 1R 2R 0S 1S)
245    vpunpckhdq  ymmC, ymmC, ymmF        ; ymmC=(1D 2D 0E 1E 2E 0F 1F 2F 1F 2F 0G 1G 2G 0H 1H 2H
246                                        ;       1T 2T 0U 1U 2U 0V 1V 2V 1V 2V -- -- -- -- -- --)
247    vpunpckldq  ymmF, ymmF, ymmH        ; ymmF=(2A 0B 1B 2B 0C 1C 2C 0D 2C 0D 1D 2D 0E 1E 2E 0F
248                                        ;       2Q 0R 1R 2R 0S 1S 2S 0T 2S 0T 1T 2T 0U 1U 2U 0V)
249
250    vpunpcklqdq ymmH, ymmA, ymmE        ; ymmH=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05
251                                        ;       0G 1G 2G 0H 1H 2H 0I 1I 2I 0J 1J 2J 0K 1K 2K 0L)
252    vpunpcklqdq ymmG, ymmD, ymmG        ; ymmG=(15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A
253                                        ;       1L 2L 0M 1M 2M 0N 1N 2N 0O 1O 2O 0P 1P 2P 0Q 1Q)
254    vpunpcklqdq ymmC, ymmF, ymmC        ; ymmC=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F
255                                        ;       2Q 0R 1R 2R 0S 1S 2S 0T 1T 2T 0U 1U 2U 0V 1V 2V)
256
257    vperm2i128  ymmA, ymmH, ymmG, 0x20  ; ymmA=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05
258                                        ;       15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A)
259    vperm2i128  ymmD, ymmC, ymmH, 0x30  ; ymmD=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F
260                                        ;       0G 1G 2G 0H 1H 2H 0I 1I 2I 0J 1J 2J 0K 1K 2K 0L)
261    vperm2i128  ymmF, ymmG, ymmC, 0x31  ; ymmF=(1L 2L 0M 1M 2M 0N 1N 2N 0O 1O 2O 0P 1P 2P 0Q 1Q
262                                        ;       2Q 0R 1R 2R 0S 1S 2S 0T 1T 2T 0U 1U 2U 0V 1V 2V)
263
264    cmp         rcx, byte SIZEOF_YMMWORD
265    jb          short .column_st64
266
267    test        rdi, SIZEOF_YMMWORD-1
268    jnz         short .out1
269    ; --(aligned)-------------------
270    vmovntdq    YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
271    vmovntdq    YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
272    vmovntdq    YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmF
273    jmp         short .out0
274.out1:  ; --(unaligned)-----------------
275    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
276    vmovdqu     YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
277    vmovdqu     YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmF
278.out0:
279    add         rdi, byte RGB_PIXELSIZE*SIZEOF_YMMWORD  ; outptr
280    sub         rcx, byte SIZEOF_YMMWORD
281    jz          near .endcolumn
282
283    add         rsi, byte SIZEOF_YMMWORD  ; inptr0
284    dec         al                        ; Yctr
285    jnz         near .Yloop_2nd
286
287    add         rbx, byte SIZEOF_YMMWORD  ; inptr1
288    add         rdx, byte SIZEOF_YMMWORD  ; inptr2
289    jmp         near .columnloop
290
291.column_st64:
292    lea         rcx, [rcx+rcx*2]            ; imul ecx, RGB_PIXELSIZE
293    cmp         rcx, byte 2*SIZEOF_YMMWORD
294    jb          short .column_st32
295    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
296    vmovdqu     YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
297    add         rdi, byte 2*SIZEOF_YMMWORD  ; outptr
298    vmovdqa     ymmA, ymmF
299    sub         rcx, byte 2*SIZEOF_YMMWORD
300    jmp         short .column_st31
301.column_st32:
302    cmp         rcx, byte SIZEOF_YMMWORD
303    jb          short .column_st31
304    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
305    add         rdi, byte SIZEOF_YMMWORD    ; outptr
306    vmovdqa     ymmA, ymmD
307    sub         rcx, byte SIZEOF_YMMWORD
308    jmp         short .column_st31
309.column_st31:
310    cmp         rcx, byte SIZEOF_XMMWORD
311    jb          short .column_st15
312    vmovdqu     XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA
313    add         rdi, byte SIZEOF_XMMWORD    ; outptr
314    vperm2i128  ymmA, ymmA, ymmA, 1
315    sub         rcx, byte SIZEOF_XMMWORD
316.column_st15:
317    ; Store the lower 8 bytes of xmmA to the output when it has enough
318    ; space.
319    cmp         rcx, byte SIZEOF_MMWORD
320    jb          short .column_st7
321    vmovq       XMM_MMWORD [rdi], xmmA
322    add         rdi, byte SIZEOF_MMWORD
323    sub         rcx, byte SIZEOF_MMWORD
324    vpsrldq     xmmA, xmmA, SIZEOF_MMWORD
325.column_st7:
326    ; Store the lower 4 bytes of xmmA to the output when it has enough
327    ; space.
328    cmp         rcx, byte SIZEOF_DWORD
329    jb          short .column_st3
330    vmovd       XMM_DWORD [rdi], xmmA
331    add         rdi, byte SIZEOF_DWORD
332    sub         rcx, byte SIZEOF_DWORD
333    vpsrldq     xmmA, xmmA, SIZEOF_DWORD
334.column_st3:
335    ; Store the lower 2 bytes of rax to the output when it has enough
336    ; space.
337    vmovd       eax, xmmA
338    cmp         rcx, byte SIZEOF_WORD
339    jb          short .column_st1
340    mov         word [rdi], ax
341    add         rdi, byte SIZEOF_WORD
342    sub         rcx, byte SIZEOF_WORD
343    shr         rax, 16
344.column_st1:
345    ; Store the lower 1 byte of rax to the output when it has enough
346    ; space.
347    test        rcx, rcx
348    jz          short .endcolumn
349    mov         byte [rdi], al
350
351%else  ; RGB_PIXELSIZE == 4 ; -----------
352
353%ifdef RGBX_FILLER_0XFF
354    vpcmpeqb    ymm6, ymm6, ymm6        ; ymm6=XE=X(02468ACE********GIKMOQSU********)
355    vpcmpeqb    ymm7, ymm7, ymm7        ; ymm7=XO=X(13579BDF********HJLNPRTV********)
356%else
357    vpxor       ymm6, ymm6, ymm6        ; ymm6=XE=X(02468ACE********GIKMOQSU********)
358    vpxor       ymm7, ymm7, ymm7        ; ymm7=XO=X(13579BDF********HJLNPRTV********)
359%endif
360    ; ymmA=(00 02 04 06 08 0A 0C 0E ** 0G 0I 0K 0M 0O 0Q 0S 0U **)
361    ; ymmB=(01 03 05 07 09 0B 0D 0F ** 0H 0J 0L 0N 0P 0R 0T 0V **)
362    ; ymmC=(10 12 14 16 18 1A 1C 1E ** 1G 1I 1K 1M 1O 1Q 1S 1U **)
363    ; ymmD=(11 13 15 17 19 1B 1D 1F ** 1H 1J 1L 1N 1P 1R 1T 1V **)
364    ; ymmE=(20 22 24 26 28 2A 2C 2E ** 2G 2I 2K 2M 2O 2Q 2S 2U **)
365    ; ymmF=(21 23 25 27 29 2B 2D 2F ** 2H 2J 2L 2N 2P 2R 2T 2V **)
366    ; ymmG=(30 32 34 36 38 3A 3C 3E ** 3G 3I 3K 3M 3O 3Q 3S 3U **)
367    ; ymmH=(31 33 35 37 39 3B 3D 3F ** 3H 3J 3L 3N 3P 3R 3T 3V **)
368
369    vpunpcklbw  ymmA, ymmA, ymmC        ; ymmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E
370                                        ;       0G 1G 0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U)
371    vpunpcklbw  ymmE, ymmE, ymmG        ; ymmE=(20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E
372                                        ;       2G 3G 2I 3I 2K 3K 2M 3M 2O 3O 2Q 3Q 2S 3S 2U 3U)
373    vpunpcklbw  ymmB, ymmB, ymmD        ; ymmB=(01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F
374                                        ;       0H 1H 0J 1J 0L 1L 0N 1N 0P 1P 0R 1R 0T 1T 0V 1V)
375    vpunpcklbw  ymmF, ymmF, ymmH        ; ymmF=(21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F
376                                        ;       2H 3H 2J 3J 2L 3L 2N 3N 2P 3P 2R 3R 2T 3T 2V 3V)
377
378    vpunpckhwd  ymmC, ymmA, ymmE        ; ymmC=(08 18 28 38 0A 1A 2A 3A 0C 1C 2C 3C 0E 1E 2E 3E
379                                        ;       0O 1O 2O 3O 0Q 1Q 2Q 3Q 0S 1S 2S 3S 0U 1U 2U 3U)
380    vpunpcklwd  ymmA, ymmA, ymmE        ; ymmA=(00 10 20 30 02 12 22 32 04 14 24 34 06 16 26 36
381                                        ;       0G 1G 2G 3G 0I 1I 2I 3I 0K 1K 2K 3K 0M 1M 2M 3M)
382    vpunpckhwd  ymmG, ymmB, ymmF        ; ymmG=(09 19 29 39 0B 1B 2B 3B 0D 1D 2D 3D 0F 1F 2F 3F
383                                        ;       0P 1P 2P 3P 0R 1R 2R 3R 0T 1T 2T 3T 0V 1V 2V 3V)
384    vpunpcklwd  ymmB, ymmB, ymmF        ; ymmB=(01 11 21 31 03 13 23 33 05 15 25 35 07 17 27 37
385                                        ;       0H 1H 2H 3H 0J 1J 2J 3J 0L 1L 2L 3L 0N 1N 2N 3N)
386
387    vpunpckhdq  ymmE, ymmA, ymmB        ; ymmE=(04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
388                                        ;       0K 1K 2K 3K 0L 1L 2L 3L 0M 1M 2M 3M 0N 1N 2N 3N)
389    vpunpckldq  ymmB, ymmA, ymmB        ; ymmB=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
390                                        ;       0G 1G 2G 3G 0H 1H 2H 3H 0I 1I 2I 3I 0J 1J 2J 3J)
391    vpunpckhdq  ymmF, ymmC, ymmG        ; ymmF=(0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F
392                                        ;       0S 1S 2S 3S 0T 1T 2T 3T 0U 1U 2U 3U 0V 1V 2V 3V)
393    vpunpckldq  ymmG, ymmC, ymmG        ; ymmG=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B
394                                        ;       0O 1O 2O 3O 0P 1P 2P 3P 0Q 1Q 2Q 3Q 0R 1R 2R 3R)
395
396    vperm2i128  ymmA, ymmB, ymmE, 0x20  ; ymmA=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
397                                        ;       04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37)
398    vperm2i128  ymmD, ymmG, ymmF, 0x20  ; ymmD=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B
399                                        ;       0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F)
400    vperm2i128  ymmC, ymmB, ymmE, 0x31  ; ymmC=(0G 1G 2G 3G 0H 1H 2H 3H 0I 1I 2I 3I 0J 1J 2J 3J
401                                        ;       0K 1K 2K 3K 0L 1L 2L 3L 0M 1M 2M 3M 0N 1N 2N 3N)
402    vperm2i128  ymmH, ymmG, ymmF, 0x31  ; ymmH=(0O 1O 2O 3O 0P 1P 2P 3P 0Q 1Q 2Q 3Q 0R 1R 2R 3R
403                                        ;       0S 1S 2S 3S 0T 1T 2T 3T 0U 1U 2U 3U 0V 1V 2V 3V)
404
405    cmp         rcx, byte SIZEOF_YMMWORD
406    jb          short .column_st64
407
408    test        rdi, SIZEOF_YMMWORD-1
409    jnz         short .out1
410    ; --(aligned)-------------------
411    vmovntdq    YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
412    vmovntdq    YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
413    vmovntdq    YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmC
414    vmovntdq    YMMWORD [rdi+3*SIZEOF_YMMWORD], ymmH
415    jmp         short .out0
416.out1:  ; --(unaligned)-----------------
417    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
418    vmovdqu     YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
419    vmovdqu     YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmC
420    vmovdqu     YMMWORD [rdi+3*SIZEOF_YMMWORD], ymmH
421.out0:
422    add         rdi, RGB_PIXELSIZE*SIZEOF_YMMWORD  ; outptr
423    sub         rcx, byte SIZEOF_YMMWORD
424    jz          near .endcolumn
425
426    add         rsi, byte SIZEOF_YMMWORD  ; inptr0
427    dec         al
428    jnz         near .Yloop_2nd
429
430    add         rbx, byte SIZEOF_YMMWORD  ; inptr1
431    add         rdx, byte SIZEOF_YMMWORD  ; inptr2
432    jmp         near .columnloop
433
434.column_st64:
435    cmp         rcx, byte SIZEOF_YMMWORD/2
436    jb          short .column_st32
437    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
438    vmovdqu     YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD
439    add         rdi, byte 2*SIZEOF_YMMWORD  ; outptr
440    vmovdqa     ymmA, ymmC
441    vmovdqa     ymmD, ymmH
442    sub         rcx, byte SIZEOF_YMMWORD/2
443.column_st32:
444    cmp         rcx, byte SIZEOF_YMMWORD/4
445    jb          short .column_st16
446    vmovdqu     YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA
447    add         rdi, byte SIZEOF_YMMWORD    ; outptr
448    vmovdqa     ymmA, ymmD
449    sub         rcx, byte SIZEOF_YMMWORD/4
450.column_st16:
451    cmp         rcx, byte SIZEOF_YMMWORD/8
452    jb          short .column_st15
453    vmovdqu     XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA
454    add         rdi, byte SIZEOF_XMMWORD    ; outptr
455    vperm2i128  ymmA, ymmA, ymmA, 1
456    sub         rcx, byte SIZEOF_YMMWORD/8
457.column_st15:
458    ; Store two pixels (8 bytes) of ymmA to the output when it has enough
459    ; space.
460    cmp         rcx, byte SIZEOF_YMMWORD/16
461    jb          short .column_st7
462    vmovq       MMWORD [rdi], xmmA
463    add         rdi, byte SIZEOF_YMMWORD/16*4
464    sub         rcx, byte SIZEOF_YMMWORD/16
465    vpsrldq     xmmA, SIZEOF_YMMWORD/16*4
466.column_st7:
467    ; Store one pixel (4 bytes) of ymmA to the output when it has enough
468    ; space.
469    test        rcx, rcx
470    jz          short .endcolumn
471    vmovd       XMM_DWORD [rdi], xmmA
472
473%endif  ; RGB_PIXELSIZE ; ---------------
474
475.endcolumn:
476    sfence                              ; flush the write buffer
477
478.return:
479    pop         rbx
480    vzeroupper
481    uncollect_args 4
482    mov         rsp, rbp                ; rsp <- aligned rbp
483    pop         rsp                     ; rsp <- original rbp
484    pop         rbp
485    ret
486
487; --------------------------------------------------------------------------
488;
489; Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical.
490;
491; GLOBAL(void)
492; jsimd_h2v2_merged_upsample_avx2(JDIMENSION output_width,
493;                                 JSAMPIMAGE input_buf,
494;                                 JDIMENSION in_row_group_ctr,
495;                                 JSAMPARRAY output_buf);
496;
497
498; r10d = JDIMENSION output_width
499; r11 = JSAMPIMAGE input_buf
500; r12d = JDIMENSION in_row_group_ctr
501; r13 = JSAMPARRAY output_buf
502
503    align       32
504    GLOBAL_FUNCTION(jsimd_h2v2_merged_upsample_avx2)
505
506EXTN(jsimd_h2v2_merged_upsample_avx2):
507    push        rbp
508    mov         rax, rsp
509    mov         rbp, rsp
510    collect_args 4
511    push        rbx
512
513    mov         eax, r10d
514
515    mov         rdi, r11
516    mov         ecx, r12d
517    mov         rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY]
518    mov         rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY]
519    mov         rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY]
520    mov         rdi, r13
521    lea         rsi, [rsi+rcx*SIZEOF_JSAMPROW]
522
523    push        rdx                     ; inptr2
524    push        rbx                     ; inptr1
525    push        rsi                     ; inptr00
526    mov         rbx, rsp
527
528    push        rdi
529    push        rcx
530    push        rax
531
532    %ifdef WIN64
533    mov         r8, rcx
534    mov         r9, rdi
535    mov         rcx, rax
536    mov         rdx, rbx
537    %else
538    mov         rdx, rcx
539    mov         rcx, rdi
540    mov         rdi, rax
541    mov         rsi, rbx
542    %endif
543
544    call        EXTN(jsimd_h2v1_merged_upsample_avx2)
545
546    pop         rax
547    pop         rcx
548    pop         rdi
549    pop         rsi
550    pop         rbx
551    pop         rdx
552
553    add         rdi, byte SIZEOF_JSAMPROW  ; outptr1
554    add         rsi, byte SIZEOF_JSAMPROW  ; inptr01
555
556    push        rdx                     ; inptr2
557    push        rbx                     ; inptr1
558    push        rsi                     ; inptr00
559    mov         rbx, rsp
560
561    push        rdi
562    push        rcx
563    push        rax
564
565    %ifdef WIN64
566    mov         r8, rcx
567    mov         r9, rdi
568    mov         rcx, rax
569    mov         rdx, rbx
570    %else
571    mov         rdx, rcx
572    mov         rcx, rdi
573    mov         rdi, rax
574    mov         rsi, rbx
575    %endif
576
577    call        EXTN(jsimd_h2v1_merged_upsample_avx2)
578
579    pop         rax
580    pop         rcx
581    pop         rdi
582    pop         rsi
583    pop         rbx
584    pop         rdx
585
586    pop         rbx
587    uncollect_args 4
588    pop         rbp
589    ret
590
591; For some reason, the OS X linker does not honor the request to align the
592; segment unless we do this.
593    align       32
594