1 /* 2 * Loongson SIMD utils 3 * 4 * Copyright (c) 2016 Loongson Technology Corporation Limited 5 * Copyright (c) 2016 Zhou Xiaoyong <zhouxiaoyong@loongson.cn> 6 * 7 * This file is part of FFmpeg. 8 * 9 * FFmpeg is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU Lesser General Public 11 * License as published by the Free Software Foundation; either 12 * version 2.1 of the License, or (at your option) any later version. 13 * 14 * FFmpeg is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with FFmpeg; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 22 */ 23 24 #ifndef AVUTIL_MIPS_MMIUTILS_H 25 #define AVUTIL_MIPS_MMIUTILS_H 26 27 #include "config.h" 28 29 #include "libavutil/mem_internal.h" 30 #include "libavutil/mips/asmdefs.h" 31 32 #if HAVE_LOONGSON2 33 34 #define DECLARE_VAR_LOW32 int32_t low32 35 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32), 36 #define DECLARE_VAR_ALL64 int64_t all64 37 #define RESTRICT_ASM_ALL64 [all64]"=&r"(all64), 38 #define DECLARE_VAR_ADDRT mips_reg addrt 39 #define RESTRICT_ASM_ADDRT [addrt]"=&r"(addrt), 40 41 #define MMI_LWX(reg, addr, stride, bias) \ 42 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 43 "lw "#reg", "#bias"(%[addrt]) \n\t" 44 45 #define MMI_SWX(reg, addr, stride, bias) \ 46 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 47 "sw "#reg", "#bias"(%[addrt]) \n\t" 48 49 #define MMI_LDX(reg, addr, stride, bias) \ 50 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 51 "ld "#reg", "#bias"(%[addrt]) \n\t" 52 53 #define MMI_SDX(reg, addr, stride, bias) \ 54 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 55 "sd "#reg", "#bias"(%[addrt]) \n\t" 56 57 #define MMI_LWC1(fp, addr, bias) \ 58 "lwc1 "#fp", "#bias"("#addr") \n\t" 59 60 #define MMI_ULWC1(fp, addr, bias) \ 61 "ulw %[low32], "#bias"("#addr") \n\t" \ 62 "mtc1 %[low32], "#fp" \n\t" 63 64 #define MMI_LWXC1(fp, addr, stride, bias) \ 65 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 66 MMI_LWC1(fp, %[addrt], bias) 67 68 #define MMI_SWC1(fp, addr, bias) \ 69 "swc1 "#fp", "#bias"("#addr") \n\t" 70 71 #define MMI_USWC1(fp, addr, bias) \ 72 "mfc1 %[low32], "#fp" \n\t" \ 73 "usw %[low32], "#bias"("#addr") \n\t" 74 75 #define MMI_SWXC1(fp, addr, stride, bias) \ 76 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 77 MMI_SWC1(fp, %[addrt], bias) 78 79 #define MMI_LDC1(fp, addr, bias) \ 80 "ldc1 "#fp", "#bias"("#addr") \n\t" 81 82 #define MMI_ULDC1(fp, addr, bias) \ 83 "uld %[all64], "#bias"("#addr") \n\t" \ 84 "dmtc1 %[all64], "#fp" \n\t" 85 86 #define MMI_LDXC1(fp, addr, stride, bias) \ 87 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 88 MMI_LDC1(fp, %[addrt], bias) 89 90 #define MMI_SDC1(fp, addr, bias) \ 91 "sdc1 "#fp", "#bias"("#addr") \n\t" 92 93 #define MMI_USDC1(fp, addr, bias) \ 94 "dmfc1 %[all64], "#fp" \n\t" \ 95 "usd %[all64], "#bias"("#addr") \n\t" 96 97 #define MMI_SDXC1(fp, addr, stride, bias) \ 98 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \ 99 MMI_SDC1(fp, %[addrt], bias) 100 101 #define MMI_LQ(reg1, reg2, addr, bias) \ 102 "ld "#reg1", "#bias"("#addr") \n\t" \ 103 "ld "#reg2", 8+"#bias"("#addr") \n\t" 104 105 #define MMI_SQ(reg1, reg2, addr, bias) \ 106 "sd "#reg1", "#bias"("#addr") \n\t" \ 107 "sd "#reg2", 8+"#bias"("#addr") \n\t" 108 109 #define MMI_LQC1(fp1, fp2, addr, bias) \ 110 "ldc1 "#fp1", "#bias"("#addr") \n\t" \ 111 "ldc1 "#fp2", 8+"#bias"("#addr") \n\t" 112 113 #define MMI_SQC1(fp1, fp2, addr, bias) \ 114 "sdc1 "#fp1", "#bias"("#addr") \n\t" \ 115 "sdc1 "#fp2", 8+"#bias"("#addr") \n\t" 116 117 #elif HAVE_LOONGSON3 /* !HAVE_LOONGSON2 */ 118 119 #define DECLARE_VAR_ALL64 120 #define RESTRICT_ASM_ALL64 121 #define DECLARE_VAR_ADDRT 122 #define RESTRICT_ASM_ADDRT 123 124 #define MMI_LWX(reg, addr, stride, bias) \ 125 "gslwx "#reg", "#bias"("#addr", "#stride") \n\t" 126 127 #define MMI_SWX(reg, addr, stride, bias) \ 128 "gsswx "#reg", "#bias"("#addr", "#stride") \n\t" 129 130 #define MMI_LDX(reg, addr, stride, bias) \ 131 "gsldx "#reg", "#bias"("#addr", "#stride") \n\t" 132 133 #define MMI_SDX(reg, addr, stride, bias) \ 134 "gssdx "#reg", "#bias"("#addr", "#stride") \n\t" 135 136 #define MMI_LWC1(fp, addr, bias) \ 137 "lwc1 "#fp", "#bias"("#addr") \n\t" 138 139 #if _MIPS_SIM == _ABIO32 /* workaround for 3A2000 gslwlc1 bug */ 140 141 #define DECLARE_VAR_LOW32 int32_t low32 142 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32), 143 144 #define MMI_ULWC1(fp, addr, bias) \ 145 "ulw %[low32], "#bias"("#addr") \n\t" \ 146 "mtc1 %[low32], "#fp" \n\t" 147 148 #else /* _MIPS_SIM != _ABIO32 */ 149 150 #define DECLARE_VAR_LOW32 151 #define RESTRICT_ASM_LOW32 152 153 #define MMI_ULWC1(fp, addr, bias) \ 154 "gslwlc1 "#fp", 3+"#bias"("#addr") \n\t" \ 155 "gslwrc1 "#fp", "#bias"("#addr") \n\t" 156 157 #endif /* _MIPS_SIM != _ABIO32 */ 158 159 #define MMI_LWXC1(fp, addr, stride, bias) \ 160 "gslwxc1 "#fp", "#bias"("#addr", "#stride") \n\t" 161 162 #define MMI_SWC1(fp, addr, bias) \ 163 "swc1 "#fp", "#bias"("#addr") \n\t" 164 165 #define MMI_USWC1(fp, addr, bias) \ 166 "gsswlc1 "#fp", 3+"#bias"("#addr") \n\t" \ 167 "gsswrc1 "#fp", "#bias"("#addr") \n\t" 168 169 #define MMI_SWXC1(fp, addr, stride, bias) \ 170 "gsswxc1 "#fp", "#bias"("#addr", "#stride") \n\t" 171 172 #define MMI_LDC1(fp, addr, bias) \ 173 "ldc1 "#fp", "#bias"("#addr") \n\t" 174 175 #define MMI_ULDC1(fp, addr, bias) \ 176 "gsldlc1 "#fp", 7+"#bias"("#addr") \n\t" \ 177 "gsldrc1 "#fp", "#bias"("#addr") \n\t" 178 179 #define MMI_LDXC1(fp, addr, stride, bias) \ 180 "gsldxc1 "#fp", "#bias"("#addr", "#stride") \n\t" 181 182 #define MMI_SDC1(fp, addr, bias) \ 183 "sdc1 "#fp", "#bias"("#addr") \n\t" 184 185 #define MMI_USDC1(fp, addr, bias) \ 186 "gssdlc1 "#fp", 7+"#bias"("#addr") \n\t" \ 187 "gssdrc1 "#fp", "#bias"("#addr") \n\t" 188 189 #define MMI_SDXC1(fp, addr, stride, bias) \ 190 "gssdxc1 "#fp", "#bias"("#addr", "#stride") \n\t" 191 192 #define MMI_LQ(reg1, reg2, addr, bias) \ 193 "gslq "#reg1", "#reg2", "#bias"("#addr") \n\t" 194 195 #define MMI_SQ(reg1, reg2, addr, bias) \ 196 "gssq "#reg1", "#reg2", "#bias"("#addr") \n\t" 197 198 #define MMI_LQC1(fp1, fp2, addr, bias) \ 199 "gslqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t" 200 201 #define MMI_SQC1(fp1, fp2, addr, bias) \ 202 "gssqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t" 203 204 #endif /* HAVE_LOONGSON2 */ 205 206 /** 207 * backup register 208 */ 209 #define BACKUP_REG \ 210 LOCAL_ALIGNED_16(double, temp_backup_reg, [8]); \ 211 if (_MIPS_SIM == _ABI64) \ 212 __asm__ volatile ( \ 213 "gssqc1 $f25, $f24, 0x00(%[temp]) \n\t" \ 214 "gssqc1 $f27, $f26, 0x10(%[temp]) \n\t" \ 215 "gssqc1 $f29, $f28, 0x20(%[temp]) \n\t" \ 216 "gssqc1 $f31, $f30, 0x30(%[temp]) \n\t" \ 217 : \ 218 : [temp]"r"(temp_backup_reg) \ 219 : "memory" \ 220 ); \ 221 else \ 222 __asm__ volatile ( \ 223 "gssqc1 $f22, $f20, 0x00(%[temp]) \n\t" \ 224 "gssqc1 $f26, $f24, 0x10(%[temp]) \n\t" \ 225 "gssqc1 $f30, $f28, 0x20(%[temp]) \n\t" \ 226 : \ 227 : [temp]"r"(temp_backup_reg) \ 228 : "memory" \ 229 ); 230 231 /** 232 * recover register 233 */ 234 #define RECOVER_REG \ 235 if (_MIPS_SIM == _ABI64) \ 236 __asm__ volatile ( \ 237 "gslqc1 $f25, $f24, 0x00(%[temp]) \n\t" \ 238 "gslqc1 $f27, $f26, 0x10(%[temp]) \n\t" \ 239 "gslqc1 $f29, $f28, 0x20(%[temp]) \n\t" \ 240 "gslqc1 $f31, $f30, 0x30(%[temp]) \n\t" \ 241 : \ 242 : [temp]"r"(temp_backup_reg) \ 243 : "memory" \ 244 ); \ 245 else \ 246 __asm__ volatile ( \ 247 "gslqc1 $f22, $f20, 0x00(%[temp]) \n\t" \ 248 "gslqc1 $f26, $f24, 0x10(%[temp]) \n\t" \ 249 "gslqc1 $f30, $f28, 0x20(%[temp]) \n\t" \ 250 : \ 251 : [temp]"r"(temp_backup_reg) \ 252 : "memory" \ 253 ); 254 255 /** 256 * brief: Transpose 2X2 word packaged data. 257 * fr_i0, fr_i1: src 258 * fr_o0, fr_o1: dst 259 */ 260 #define TRANSPOSE_2W(fr_i0, fr_i1, fr_o0, fr_o1) \ 261 "punpcklwd "#fr_o0", "#fr_i0", "#fr_i1" \n\t" \ 262 "punpckhwd "#fr_o1", "#fr_i0", "#fr_i1" \n\t" 263 264 /** 265 * brief: Transpose 4X4 half word packaged data. 266 * fr_i0, fr_i1, fr_i2, fr_i3: src & dst 267 * fr_t0, fr_t1, fr_t2, fr_t3: temporary register 268 */ 269 #define TRANSPOSE_4H(fr_i0, fr_i1, fr_i2, fr_i3, \ 270 fr_t0, fr_t1, fr_t2, fr_t3) \ 271 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \ 272 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \ 273 "punpcklhw "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \ 274 "punpckhhw "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \ 275 "punpcklwd "#fr_i0", "#fr_t0", "#fr_t2" \n\t" \ 276 "punpckhwd "#fr_i1", "#fr_t0", "#fr_t2" \n\t" \ 277 "punpcklwd "#fr_i2", "#fr_t1", "#fr_t3" \n\t" \ 278 "punpckhwd "#fr_i3", "#fr_t1", "#fr_t3" \n\t" 279 280 /** 281 * brief: Transpose 8x8 byte packaged data. 282 * fr_i0~i7: src & dst 283 * fr_t0~t3: temporary register 284 */ 285 #define TRANSPOSE_8B(fr_i0, fr_i1, fr_i2, fr_i3, fr_i4, fr_i5, \ 286 fr_i6, fr_i7, fr_t0, fr_t1, fr_t2, fr_t3) \ 287 "punpcklbh "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \ 288 "punpckhbh "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \ 289 "punpcklbh "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \ 290 "punpckhbh "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \ 291 "punpcklbh "#fr_i0", "#fr_i4", "#fr_i5" \n\t" \ 292 "punpckhbh "#fr_i1", "#fr_i4", "#fr_i5" \n\t" \ 293 "punpcklbh "#fr_i2", "#fr_i6", "#fr_i7" \n\t" \ 294 "punpckhbh "#fr_i3", "#fr_i6", "#fr_i7" \n\t" \ 295 "punpcklhw "#fr_i4", "#fr_t0", "#fr_t2" \n\t" \ 296 "punpckhhw "#fr_i5", "#fr_t0", "#fr_t2" \n\t" \ 297 "punpcklhw "#fr_i6", "#fr_t1", "#fr_t3" \n\t" \ 298 "punpckhhw "#fr_i7", "#fr_t1", "#fr_t3" \n\t" \ 299 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i2" \n\t" \ 300 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i2" \n\t" \ 301 "punpcklhw "#fr_t2", "#fr_i1", "#fr_i3" \n\t" \ 302 "punpckhhw "#fr_t3", "#fr_i1", "#fr_i3" \n\t" \ 303 "punpcklwd "#fr_i0", "#fr_i4", "#fr_t0" \n\t" \ 304 "punpckhwd "#fr_i1", "#fr_i4", "#fr_t0" \n\t" \ 305 "punpcklwd "#fr_i2", "#fr_i5", "#fr_t1" \n\t" \ 306 "punpckhwd "#fr_i3", "#fr_i5", "#fr_t1" \n\t" \ 307 "punpcklwd "#fr_i4", "#fr_i6", "#fr_t2" \n\t" \ 308 "punpckhwd "#fr_i5", "#fr_i6", "#fr_t2" \n\t" \ 309 "punpcklwd "#fr_i6", "#fr_i7", "#fr_t3" \n\t" \ 310 "punpckhwd "#fr_i7", "#fr_i7", "#fr_t3" \n\t" 311 312 /** 313 * brief: Parallel SRA for 8 byte packaged data. 314 * fr_i0: src 315 * fr_i1: SRA number(SRAB number + 8) 316 * fr_t0, fr_t1: temporary register 317 * fr_d0: dst 318 */ 319 #define PSRAB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \ 320 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \ 321 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \ 322 "psrah "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \ 323 "psrah "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \ 324 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t" 325 326 /** 327 * brief: Parallel SRL for 8 byte packaged data. 328 * fr_i0: src 329 * fr_i1: SRL number(SRLB number + 8) 330 * fr_t0, fr_t1: temporary register 331 * fr_d0: dst 332 */ 333 #define PSRLB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \ 334 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \ 335 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \ 336 "psrlh "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \ 337 "psrlh "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \ 338 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t" 339 340 #define PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \ 341 "psrah "#fp1", "#fp1", "#shift" \n\t" \ 342 "psrah "#fp2", "#fp2", "#shift" \n\t" \ 343 "psrah "#fp3", "#fp3", "#shift" \n\t" \ 344 "psrah "#fp4", "#fp4", "#shift" \n\t" 345 346 #define PSRAH_8_MMI(fp1, fp2, fp3, fp4, fp5, fp6, fp7, fp8, shift) \ 347 PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \ 348 PSRAH_4_MMI(fp5, fp6, fp7, fp8, shift) 349 350 /** 351 * brief: (((value) + (1 << ((n) - 1))) >> (n)) 352 * fr_i0: src & dst 353 * fr_i1: Operand number 354 * fr_t0, fr_t1: temporary FPR 355 * gr_t0: temporary GPR 356 */ 357 #define ROUND_POWER_OF_TWO_MMI(fr_i0, fr_i1, fr_t0, fr_t1, gr_t0) \ 358 "li "#gr_t0", 0x01 \n\t" \ 359 "dmtc1 "#gr_t0", "#fr_t0" \n\t" \ 360 "punpcklwd "#fr_t0", "#fr_t0", "#fr_t0" \n\t" \ 361 "psubw "#fr_t1", "#fr_i1", "#fr_t0" \n\t" \ 362 "psllw "#fr_t1", "#fr_t0", "#fr_t1" \n\t" \ 363 "paddw "#fr_i0", "#fr_i0", "#fr_t1" \n\t" \ 364 "psraw "#fr_i0", "#fr_i0", "#fr_i1" \n\t" 365 366 #endif /* AVUTILS_MIPS_MMIUTILS_H */ 367