1 /* CpuArch.h -- CPU specific code 2 2016-06-09: Igor Pavlov : Public domain */ 3 4 #ifndef __CPU_ARCH_H 5 #define __CPU_ARCH_H 6 7 #include "7zTypes.h" 8 9 EXTERN_C_BEGIN 10 11 /* 12 MY_CPU_LE means that CPU is LITTLE ENDIAN. 13 MY_CPU_BE means that CPU is BIG ENDIAN. 14 If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform. 15 16 MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses. 17 */ 18 19 #if defined(_M_X64) \ 20 || defined(_M_AMD64) \ 21 || defined(__x86_64__) \ 22 || defined(__AMD64__) \ 23 || defined(__amd64__) 24 #define MY_CPU_AMD64 25 #endif 26 27 #if defined(MY_CPU_AMD64) \ 28 || defined(_M_IA64) \ 29 || defined(__AARCH64EL__) \ 30 || defined(__AARCH64EB__) 31 #define MY_CPU_64BIT 32 #endif 33 34 #if defined(_M_IX86) || defined(__i386__) 35 #define MY_CPU_X86 36 #endif 37 38 #if defined(MY_CPU_X86) || defined(MY_CPU_AMD64) 39 #define MY_CPU_X86_OR_AMD64 40 #endif 41 42 #if defined(MY_CPU_X86) \ 43 || defined(_M_ARM) \ 44 || defined(__ARMEL__) \ 45 || defined(__THUMBEL__) \ 46 || defined(__ARMEB__) \ 47 || defined(__THUMBEB__) 48 #define MY_CPU_32BIT 49 #endif 50 51 #if defined(_WIN32) && defined(_M_ARM) 52 #define MY_CPU_ARM_LE 53 #endif 54 55 #if defined(_WIN32) && defined(_M_IA64) 56 #define MY_CPU_IA64_LE 57 #endif 58 59 #if defined(MY_CPU_X86_OR_AMD64) \ 60 || defined(MY_CPU_ARM_LE) \ 61 || defined(MY_CPU_IA64_LE) \ 62 || defined(__LITTLE_ENDIAN__) \ 63 || defined(__ARMEL__) \ 64 || defined(__THUMBEL__) \ 65 || defined(__AARCH64EL__) \ 66 || defined(__MIPSEL__) \ 67 || defined(__MIPSEL) \ 68 || defined(_MIPSEL) \ 69 || defined(__BFIN__) \ 70 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) 71 #define MY_CPU_LE 72 #endif 73 74 #if defined(__BIG_ENDIAN__) \ 75 || defined(__ARMEB__) \ 76 || defined(__THUMBEB__) \ 77 || defined(__AARCH64EB__) \ 78 || defined(__MIPSEB__) \ 79 || defined(__MIPSEB) \ 80 || defined(_MIPSEB) \ 81 || defined(__m68k__) \ 82 || defined(__s390__) \ 83 || defined(__s390x__) \ 84 || defined(__zarch__) \ 85 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) 86 #define MY_CPU_BE 87 #endif 88 89 #if defined(MY_CPU_LE) && defined(MY_CPU_BE) 90 Stop_Compiling_Bad_Endian 91 #endif 92 93 94 #ifdef MY_CPU_LE 95 #if defined(MY_CPU_X86_OR_AMD64) \ 96 /* || defined(__AARCH64EL__) */ 97 #define MY_CPU_LE_UNALIGN 98 #endif 99 #endif 100 101 102 #ifdef MY_CPU_LE_UNALIGN 103 104 #define GetUi16(p) (*(const UInt16 *)(const void *)(p)) 105 #define GetUi32(p) (*(const UInt32 *)(const void *)(p)) 106 #define GetUi64(p) (*(const UInt64 *)(const void *)(p)) 107 108 #define SetUi16(p, v) { *(UInt16 *)(p) = (v); } 109 #define SetUi32(p, v) { *(UInt32 *)(p) = (v); } 110 #define SetUi64(p, v) { *(UInt64 *)(p) = (v); } 111 112 #else 113 114 #define GetUi16(p) ( (UInt16) ( \ 115 ((const Byte *)(p))[0] | \ 116 ((UInt16)((const Byte *)(p))[1] << 8) )) 117 118 #define GetUi32(p) ( \ 119 ((const Byte *)(p))[0] | \ 120 ((UInt32)((const Byte *)(p))[1] << 8) | \ 121 ((UInt32)((const Byte *)(p))[2] << 16) | \ 122 ((UInt32)((const Byte *)(p))[3] << 24)) 123 124 #define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32)) 125 126 #define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ 127 _ppp_[0] = (Byte)_vvv_; \ 128 _ppp_[1] = (Byte)(_vvv_ >> 8); } 129 130 #define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ 131 _ppp_[0] = (Byte)_vvv_; \ 132 _ppp_[1] = (Byte)(_vvv_ >> 8); \ 133 _ppp_[2] = (Byte)(_vvv_ >> 16); \ 134 _ppp_[3] = (Byte)(_vvv_ >> 24); } 135 136 #define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \ 137 SetUi32(_ppp2_ , (UInt32)_vvv2_); \ 138 SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); } 139 140 #endif 141 142 143 #if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ (_MSC_VER >= 1300) 144 145 /* Note: we use bswap instruction, that is unsupported in 386 cpu */ 146 147 #include <stdlib.h> 148 149 #pragma intrinsic(_byteswap_ulong) 150 #pragma intrinsic(_byteswap_uint64) 151 #define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p)) 152 #define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p)) 153 154 #define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v) 155 156 #elif defined(MY_CPU_LE_UNALIGN) && defined (__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) 157 158 #define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const Byte *)(p)) 159 #define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const Byte *)(p)) 160 161 #define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = __builtin_bswap32(v) 162 163 #else 164 165 #define GetBe32(p) ( \ 166 ((UInt32)((const Byte *)(p))[0] << 24) | \ 167 ((UInt32)((const Byte *)(p))[1] << 16) | \ 168 ((UInt32)((const Byte *)(p))[2] << 8) | \ 169 ((const Byte *)(p))[3] ) 170 171 #define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4)) 172 173 #define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ 174 _ppp_[0] = (Byte)(_vvv_ >> 24); \ 175 _ppp_[1] = (Byte)(_vvv_ >> 16); \ 176 _ppp_[2] = (Byte)(_vvv_ >> 8); \ 177 _ppp_[3] = (Byte)_vvv_; } 178 179 #endif 180 181 182 #define GetBe16(p) ( (UInt16) ( \ 183 ((UInt16)((const Byte *)(p))[0] << 8) | \ 184 ((const Byte *)(p))[1] )) 185 186 187 188 #ifdef MY_CPU_X86_OR_AMD64 189 190 typedef struct 191 { 192 UInt32 maxFunc; 193 UInt32 vendor[3]; 194 UInt32 ver; 195 UInt32 b; 196 UInt32 c; 197 UInt32 d; 198 } Cx86cpuid; 199 200 enum 201 { 202 CPU_FIRM_INTEL, 203 CPU_FIRM_AMD, 204 CPU_FIRM_VIA 205 }; 206 207 void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d); 208 209 Bool x86cpuid_CheckAndRead(Cx86cpuid *p); 210 int x86cpuid_GetFirm(const Cx86cpuid *p); 211 212 #define x86cpuid_GetFamily(ver) (((ver >> 16) & 0xFF0) | ((ver >> 8) & 0xF)) 213 #define x86cpuid_GetModel(ver) (((ver >> 12) & 0xF0) | ((ver >> 4) & 0xF)) 214 #define x86cpuid_GetStepping(ver) (ver & 0xF) 215 216 Bool CPU_Is_InOrder(); 217 Bool CPU_Is_Aes_Supported(); 218 219 #endif 220 221 EXTERN_C_END 222 223 #endif 224