1; 2; jquant.asm - sample data conversion and quantization (MMX) 3; 4; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB 5; Copyright (C) 2016, D. R. Commander. 6; 7; Based on the x86 SIMD extension for IJG JPEG library 8; Copyright (C) 1999-2006, MIYASAKA Masaru. 9; For conditions of distribution and use, see copyright notice in jsimdext.inc 10; 11; This file should be assembled with NASM (Netwide Assembler), 12; can *not* be assembled with Microsoft's MASM or any compatible 13; assembler (including Borland's Turbo Assembler). 14; NASM is available from http://nasm.sourceforge.net/ or 15; http://sourceforge.net/project/showfiles.php?group_id=6208 16 17%include "jsimdext.inc" 18%include "jdct.inc" 19 20; -------------------------------------------------------------------------- 21 SECTION SEG_TEXT 22 BITS 32 23; 24; Load data into workspace, applying unsigned->signed conversion 25; 26; GLOBAL(void) 27; jsimd_convsamp_mmx(JSAMPARRAY sample_data, JDIMENSION start_col, 28; DCTELEM *workspace); 29; 30 31%define sample_data ebp + 8 ; JSAMPARRAY sample_data 32%define start_col ebp + 12 ; JDIMENSION start_col 33%define workspace ebp + 16 ; DCTELEM *workspace 34 35 align 32 36 GLOBAL_FUNCTION(jsimd_convsamp_mmx) 37 38EXTN(jsimd_convsamp_mmx): 39 push ebp 40 mov ebp, esp 41 push ebx 42; push ecx ; need not be preserved 43; push edx ; need not be preserved 44 push esi 45 push edi 46 47 pxor mm6, mm6 ; mm6=(all 0's) 48 pcmpeqw mm7, mm7 49 psllw mm7, 7 ; mm7={0xFF80 0xFF80 0xFF80 0xFF80} 50 51 mov esi, JSAMPARRAY [sample_data] ; (JSAMPROW *) 52 mov eax, JDIMENSION [start_col] 53 mov edi, POINTER [workspace] ; (DCTELEM *) 54 mov ecx, DCTSIZE/4 55 alignx 16, 7 56.convloop: 57 mov ebx, JSAMPROW [esi+0*SIZEOF_JSAMPROW] ; (JSAMPLE *) 58 mov edx, JSAMPROW [esi+1*SIZEOF_JSAMPROW] ; (JSAMPLE *) 59 60 movq mm0, MMWORD [ebx+eax*SIZEOF_JSAMPLE] ; mm0=(01234567) 61 movq mm1, MMWORD [edx+eax*SIZEOF_JSAMPLE] ; mm1=(89ABCDEF) 62 63 mov ebx, JSAMPROW [esi+2*SIZEOF_JSAMPROW] ; (JSAMPLE *) 64 mov edx, JSAMPROW [esi+3*SIZEOF_JSAMPROW] ; (JSAMPLE *) 65 66 movq mm2, MMWORD [ebx+eax*SIZEOF_JSAMPLE] ; mm2=(GHIJKLMN) 67 movq mm3, MMWORD [edx+eax*SIZEOF_JSAMPLE] ; mm3=(OPQRSTUV) 68 69 movq mm4, mm0 70 punpcklbw mm0, mm6 ; mm0=(0123) 71 punpckhbw mm4, mm6 ; mm4=(4567) 72 movq mm5, mm1 73 punpcklbw mm1, mm6 ; mm1=(89AB) 74 punpckhbw mm5, mm6 ; mm5=(CDEF) 75 76 paddw mm0, mm7 77 paddw mm4, mm7 78 paddw mm1, mm7 79 paddw mm5, mm7 80 81 movq MMWORD [MMBLOCK(0,0,edi,SIZEOF_DCTELEM)], mm0 82 movq MMWORD [MMBLOCK(0,1,edi,SIZEOF_DCTELEM)], mm4 83 movq MMWORD [MMBLOCK(1,0,edi,SIZEOF_DCTELEM)], mm1 84 movq MMWORD [MMBLOCK(1,1,edi,SIZEOF_DCTELEM)], mm5 85 86 movq mm0, mm2 87 punpcklbw mm2, mm6 ; mm2=(GHIJ) 88 punpckhbw mm0, mm6 ; mm0=(KLMN) 89 movq mm4, mm3 90 punpcklbw mm3, mm6 ; mm3=(OPQR) 91 punpckhbw mm4, mm6 ; mm4=(STUV) 92 93 paddw mm2, mm7 94 paddw mm0, mm7 95 paddw mm3, mm7 96 paddw mm4, mm7 97 98 movq MMWORD [MMBLOCK(2,0,edi,SIZEOF_DCTELEM)], mm2 99 movq MMWORD [MMBLOCK(2,1,edi,SIZEOF_DCTELEM)], mm0 100 movq MMWORD [MMBLOCK(3,0,edi,SIZEOF_DCTELEM)], mm3 101 movq MMWORD [MMBLOCK(3,1,edi,SIZEOF_DCTELEM)], mm4 102 103 add esi, byte 4*SIZEOF_JSAMPROW 104 add edi, byte 4*DCTSIZE*SIZEOF_DCTELEM 105 dec ecx 106 jnz short .convloop 107 108 emms ; empty MMX state 109 110 pop edi 111 pop esi 112; pop edx ; need not be preserved 113; pop ecx ; need not be preserved 114 pop ebx 115 pop ebp 116 ret 117 118; -------------------------------------------------------------------------- 119; 120; Quantize/descale the coefficients, and store into coef_block 121; 122; This implementation is based on an algorithm described in 123; "How to optimize for the Pentium family of microprocessors" 124; (http://www.agner.org/assem/). 125; 126; GLOBAL(void) 127; jsimd_quantize_mmx(JCOEFPTR coef_block, DCTELEM *divisors, 128; DCTELEM *workspace); 129; 130 131%define RECIPROCAL(m, n, b) \ 132 MMBLOCK(DCTSIZE * 0 + (m), (n), (b), SIZEOF_DCTELEM) 133%define CORRECTION(m, n, b) \ 134 MMBLOCK(DCTSIZE * 1 + (m), (n), (b), SIZEOF_DCTELEM) 135%define SCALE(m, n, b) \ 136 MMBLOCK(DCTSIZE * 2 + (m), (n), (b), SIZEOF_DCTELEM) 137%define SHIFT(m, n, b) \ 138 MMBLOCK(DCTSIZE * 3 + (m), (n), (b), SIZEOF_DCTELEM) 139 140%define coef_block ebp + 8 ; JCOEFPTR coef_block 141%define divisors ebp + 12 ; DCTELEM *divisors 142%define workspace ebp + 16 ; DCTELEM *workspace 143 144 align 32 145 GLOBAL_FUNCTION(jsimd_quantize_mmx) 146 147EXTN(jsimd_quantize_mmx): 148 push ebp 149 mov ebp, esp 150; push ebx ; unused 151; push ecx ; unused 152; push edx ; need not be preserved 153 push esi 154 push edi 155 156 mov esi, POINTER [workspace] 157 mov edx, POINTER [divisors] 158 mov edi, JCOEFPTR [coef_block] 159 mov ah, 2 160 alignx 16, 7 161.quantloop1: 162 mov al, DCTSIZE2/8/2 163 alignx 16, 7 164.quantloop2: 165 movq mm2, MMWORD [MMBLOCK(0,0,esi,SIZEOF_DCTELEM)] 166 movq mm3, MMWORD [MMBLOCK(0,1,esi,SIZEOF_DCTELEM)] 167 168 movq mm0, mm2 169 movq mm1, mm3 170 171 psraw mm2, (WORD_BIT-1) ; -1 if value < 0, 0 otherwise 172 psraw mm3, (WORD_BIT-1) 173 174 pxor mm0, mm2 ; val = -val 175 pxor mm1, mm3 176 psubw mm0, mm2 177 psubw mm1, mm3 178 179 ; 180 ; MMX is an annoyingly crappy instruction set. It has two 181 ; misfeatures that are causing problems here: 182 ; 183 ; - All multiplications are signed. 184 ; 185 ; - The second operand for the shifts is not treated as packed. 186 ; 187 ; 188 ; We work around the first problem by implementing this algorithm: 189 ; 190 ; unsigned long unsigned_multiply(unsigned short x, unsigned short y) 191 ; { 192 ; enum { SHORT_BIT = 16 }; 193 ; signed short sx = (signed short)x; 194 ; signed short sy = (signed short)y; 195 ; signed long sz; 196 ; 197 ; sz = (long)sx * (long)sy; /* signed multiply */ 198 ; 199 ; if (sx < 0) sz += (long)sy << SHORT_BIT; 200 ; if (sy < 0) sz += (long)sx << SHORT_BIT; 201 ; 202 ; return (unsigned long)sz; 203 ; } 204 ; 205 ; (note that a negative sx adds _sy_ and vice versa) 206 ; 207 ; For the second problem, we replace the shift by a multiplication. 208 ; Unfortunately that means we have to deal with the signed issue again. 209 ; 210 211 paddw mm0, MMWORD [CORRECTION(0,0,edx)] ; correction + roundfactor 212 paddw mm1, MMWORD [CORRECTION(0,1,edx)] 213 214 movq mm4, mm0 ; store current value for later 215 movq mm5, mm1 216 pmulhw mm0, MMWORD [RECIPROCAL(0,0,edx)] ; reciprocal 217 pmulhw mm1, MMWORD [RECIPROCAL(0,1,edx)] 218 paddw mm0, mm4 ; reciprocal is always negative (MSB=1), 219 paddw mm1, mm5 ; so we always need to add the initial value 220 ; (input value is never negative as we 221 ; inverted it at the start of this routine) 222 223 ; here it gets a bit tricky as both scale 224 ; and mm0/mm1 can be negative 225 movq mm6, MMWORD [SCALE(0,0,edx)] ; scale 226 movq mm7, MMWORD [SCALE(0,1,edx)] 227 movq mm4, mm0 228 movq mm5, mm1 229 pmulhw mm0, mm6 230 pmulhw mm1, mm7 231 232 psraw mm6, (WORD_BIT-1) ; determine if scale is negative 233 psraw mm7, (WORD_BIT-1) 234 235 pand mm6, mm4 ; and add input if it is 236 pand mm7, mm5 237 paddw mm0, mm6 238 paddw mm1, mm7 239 240 psraw mm4, (WORD_BIT-1) ; then check if negative input 241 psraw mm5, (WORD_BIT-1) 242 243 pand mm4, MMWORD [SCALE(0,0,edx)] ; and add scale if it is 244 pand mm5, MMWORD [SCALE(0,1,edx)] 245 paddw mm0, mm4 246 paddw mm1, mm5 247 248 pxor mm0, mm2 ; val = -val 249 pxor mm1, mm3 250 psubw mm0, mm2 251 psubw mm1, mm3 252 253 movq MMWORD [MMBLOCK(0,0,edi,SIZEOF_DCTELEM)], mm0 254 movq MMWORD [MMBLOCK(0,1,edi,SIZEOF_DCTELEM)], mm1 255 256 add esi, byte 8*SIZEOF_DCTELEM 257 add edx, byte 8*SIZEOF_DCTELEM 258 add edi, byte 8*SIZEOF_JCOEF 259 dec al 260 jnz near .quantloop2 261 dec ah 262 jnz near .quantloop1 ; to avoid branch misprediction 263 264 emms ; empty MMX state 265 266 pop edi 267 pop esi 268; pop edx ; need not be preserved 269; pop ecx ; unused 270; pop ebx ; unused 271 pop ebp 272 ret 273 274; For some reason, the OS X linker does not honor the request to align the 275; segment unless we do this. 276 align 32 277