1 2 /*---------------------------------------------------------------*/ 3 /*--- begin host_generic_simd64.h ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2017 OpenWorks LLP 11 info@open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34 */ 35 36 /* Generic helper functions for doing 64-bit SIMD arithmetic in cases 37 where the instruction selectors cannot generate code in-line. 38 These are purely back-end entities and cannot be seen/referenced 39 as clean helper functions from IR. 40 41 These will get called from generated code and therefore should be 42 well behaved -- no floating point or mmx insns, just straight 43 integer code. 44 45 Each function implements the correspondingly-named IR primop. 46 */ 47 48 #ifndef __VEX_HOST_GENERIC_SIMD64_H 49 #define __VEX_HOST_GENERIC_SIMD64_H 50 51 #include "libvex_basictypes.h" 52 53 /* DO NOT MAKE THESE INTO REGPARM FNS! THIS WILL BREAK CALLING 54 SEQUENCES GENERATED BY host-x86/isel.c. */ 55 56 extern ULong h_generic_calc_Add32x2 ( ULong, ULong ); 57 extern ULong h_generic_calc_Add16x4 ( ULong, ULong ); 58 extern ULong h_generic_calc_Add8x8 ( ULong, ULong ); 59 60 extern ULong h_generic_calc_QAdd16Sx4 ( ULong, ULong ); 61 extern ULong h_generic_calc_QAdd8Sx8 ( ULong, ULong ); 62 extern ULong h_generic_calc_QAdd16Ux4 ( ULong, ULong ); 63 extern ULong h_generic_calc_QAdd8Ux8 ( ULong, ULong ); 64 65 extern ULong h_generic_calc_Sub32x2 ( ULong, ULong ); 66 extern ULong h_generic_calc_Sub16x4 ( ULong, ULong ); 67 extern ULong h_generic_calc_Sub8x8 ( ULong, ULong ); 68 69 extern ULong h_generic_calc_QSub16Sx4 ( ULong, ULong ); 70 extern ULong h_generic_calc_QSub8Sx8 ( ULong, ULong ); 71 extern ULong h_generic_calc_QSub16Ux4 ( ULong, ULong ); 72 extern ULong h_generic_calc_QSub8Ux8 ( ULong, ULong ); 73 74 extern ULong h_generic_calc_Mul16x4 ( ULong, ULong ); 75 extern ULong h_generic_calc_Mul32x2 ( ULong, ULong ); 76 extern ULong h_generic_calc_MulHi16Sx4 ( ULong, ULong ); 77 extern ULong h_generic_calc_MulHi16Ux4 ( ULong, ULong ); 78 79 extern ULong h_generic_calc_CmpEQ32x2 ( ULong, ULong ); 80 extern ULong h_generic_calc_CmpEQ16x4 ( ULong, ULong ); 81 extern ULong h_generic_calc_CmpEQ8x8 ( ULong, ULong ); 82 extern ULong h_generic_calc_CmpGT32Sx2 ( ULong, ULong ); 83 extern ULong h_generic_calc_CmpGT16Sx4 ( ULong, ULong ); 84 extern ULong h_generic_calc_CmpGT8Sx8 ( ULong, ULong ); 85 86 extern ULong h_generic_calc_CmpNEZ32x2 ( ULong ); 87 extern ULong h_generic_calc_CmpNEZ16x4 ( ULong ); 88 extern ULong h_generic_calc_CmpNEZ8x8 ( ULong ); 89 90 extern ULong h_generic_calc_QNarrowBin32Sto16Sx4 ( ULong, ULong ); 91 extern ULong h_generic_calc_QNarrowBin16Sto8Sx8 ( ULong, ULong ); 92 extern ULong h_generic_calc_QNarrowBin16Sto8Ux8 ( ULong, ULong ); 93 extern ULong h_generic_calc_NarrowBin32to16x4 ( ULong, ULong ); 94 extern ULong h_generic_calc_NarrowBin16to8x8 ( ULong, ULong ); 95 96 extern ULong h_generic_calc_InterleaveHI8x8 ( ULong, ULong ); 97 extern ULong h_generic_calc_InterleaveLO8x8 ( ULong, ULong ); 98 extern ULong h_generic_calc_InterleaveHI16x4 ( ULong, ULong ); 99 extern ULong h_generic_calc_InterleaveLO16x4 ( ULong, ULong ); 100 extern ULong h_generic_calc_InterleaveHI32x2 ( ULong, ULong ); 101 extern ULong h_generic_calc_InterleaveLO32x2 ( ULong, ULong ); 102 103 extern ULong h_generic_calc_CatOddLanes16x4 ( ULong, ULong ); 104 extern ULong h_generic_calc_CatEvenLanes16x4 ( ULong, ULong ); 105 extern ULong h_generic_calc_Perm8x8 ( ULong, ULong ); 106 107 extern ULong h_generic_calc_ShlN8x8 ( ULong, UInt ); 108 extern ULong h_generic_calc_ShlN16x4 ( ULong, UInt ); 109 extern ULong h_generic_calc_ShlN32x2 ( ULong, UInt ); 110 111 extern ULong h_generic_calc_ShrN16x4 ( ULong, UInt ); 112 extern ULong h_generic_calc_ShrN32x2 ( ULong, UInt ); 113 114 extern ULong h_generic_calc_SarN8x8 ( ULong, UInt ); 115 extern ULong h_generic_calc_SarN16x4 ( ULong, UInt ); 116 extern ULong h_generic_calc_SarN32x2 ( ULong, UInt ); 117 118 extern ULong h_generic_calc_Avg8Ux8 ( ULong, ULong ); 119 extern ULong h_generic_calc_Avg16Ux4 ( ULong, ULong ); 120 121 extern ULong h_generic_calc_Max16Sx4 ( ULong, ULong ); 122 extern ULong h_generic_calc_Max8Ux8 ( ULong, ULong ); 123 extern ULong h_generic_calc_Min16Sx4 ( ULong, ULong ); 124 extern ULong h_generic_calc_Min8Ux8 ( ULong, ULong ); 125 126 extern UInt h_generic_calc_GetMSBs8x8 ( ULong ); 127 128 /* 32-bit SIMD HELPERS */ 129 130 extern UInt h_generic_calc_Add16x2 ( UInt, UInt ); 131 extern UInt h_generic_calc_Sub16x2 ( UInt, UInt ); 132 133 extern UInt h_generic_calc_HAdd16Ux2 ( UInt, UInt ); 134 extern UInt h_generic_calc_HAdd16Sx2 ( UInt, UInt ); 135 extern UInt h_generic_calc_HSub16Ux2 ( UInt, UInt ); 136 extern UInt h_generic_calc_HSub16Sx2 ( UInt, UInt ); 137 138 extern UInt h_generic_calc_QAdd16Ux2 ( UInt, UInt ); 139 extern UInt h_generic_calc_QAdd16Sx2 ( UInt, UInt ); 140 extern UInt h_generic_calc_QSub16Ux2 ( UInt, UInt ); 141 extern UInt h_generic_calc_QSub16Sx2 ( UInt, UInt ); 142 143 extern UInt h_generic_calc_Add8x4 ( UInt, UInt ); 144 extern UInt h_generic_calc_Sub8x4 ( UInt, UInt ); 145 146 extern UInt h_generic_calc_HAdd8Ux4 ( UInt, UInt ); 147 extern UInt h_generic_calc_HAdd8Sx4 ( UInt, UInt ); 148 extern UInt h_generic_calc_HSub8Ux4 ( UInt, UInt ); 149 extern UInt h_generic_calc_HSub8Sx4 ( UInt, UInt ); 150 151 extern UInt h_generic_calc_QAdd8Ux4 ( UInt, UInt ); 152 extern UInt h_generic_calc_QAdd8Sx4 ( UInt, UInt ); 153 extern UInt h_generic_calc_QSub8Ux4 ( UInt, UInt ); 154 extern UInt h_generic_calc_QSub8Sx4 ( UInt, UInt ); 155 156 extern UInt h_generic_calc_Sad8Ux4 ( UInt, UInt ); 157 158 extern UInt h_generic_calc_QAdd32S ( UInt, UInt ); 159 extern UInt h_generic_calc_QSub32S ( UInt, UInt ); 160 161 extern UInt h_generic_calc_CmpNEZ16x2 ( UInt ); 162 extern UInt h_generic_calc_CmpNEZ8x4 ( UInt ); 163 164 extern ULong h_calc_DPBtoBCD ( ULong dpb ); 165 extern ULong h_calc_BCDtoDPB ( ULong bcd ); 166 167 // Signed and unsigned integer division, that behave like 168 // the ARMv7 UDIV and SDIV instructions. 169 extern UInt h_calc_udiv32_w_arm_semantics ( UInt, UInt ); 170 extern ULong h_calc_udiv64_w_arm_semantics ( ULong, ULong ); 171 extern Int h_calc_sdiv32_w_arm_semantics ( Int, Int ); 172 extern Long h_calc_sdiv64_w_arm_semantics ( Long, Long ); 173 174 175 #endif /* ndef __VEX_HOST_GENERIC_SIMD64_H */ 176 177 /*---------------------------------------------------------------*/ 178 /*--- end host_generic_simd64.h ---*/ 179 /*---------------------------------------------------------------*/ 180