• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2013 Google Inc. All Rights Reserved.
2 
3    Distributed under MIT license.
4    See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
5 */
6 
7 /* Macros for endianness, branch prediction and unaligned loads and stores. */
8 
9 #ifndef BROTLI_ENC_PORT_H_
10 #define BROTLI_ENC_PORT_H_
11 
12 #include <assert.h>
13 #include <string.h>  /* memcpy */
14 
15 #include <brotli/port.h>
16 #include <brotli/types.h>
17 
18 #if defined OS_LINUX || defined OS_CYGWIN
19 #include <endian.h>
20 #elif defined OS_FREEBSD
21 #include <machine/endian.h>
22 #elif defined OS_MACOSX
23 #include <machine/endian.h>
24 /* Let's try and follow the Linux convention */
25 #define __BYTE_ORDER  BYTE_ORDER
26 #define __LITTLE_ENDIAN LITTLE_ENDIAN
27 #endif
28 
29 /* define the macro IS_LITTLE_ENDIAN
30    using the above endian definitions from endian.h if
31    endian.h was included */
32 #ifdef __BYTE_ORDER
33 #if __BYTE_ORDER == __LITTLE_ENDIAN
34 #define IS_LITTLE_ENDIAN
35 #endif
36 
37 #else
38 
39 #if defined(__LITTLE_ENDIAN__)
40 #define IS_LITTLE_ENDIAN
41 #endif
42 #endif  /* __BYTE_ORDER */
43 
44 #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
45 #define IS_LITTLE_ENDIAN
46 #endif
47 
48 /* Enable little-endian optimization for x64 architecture on Windows. */
49 #if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64)
50 #define IS_LITTLE_ENDIAN
51 #endif
52 
53 /* Portable handling of unaligned loads, stores, and copies.
54    On some platforms, like ARM, the copy functions can be more efficient
55    then a load and a store. */
56 
57 #if defined(ARCH_PIII) || \
58   defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
59 
60 /* x86 and x86-64 can perform unaligned loads/stores directly;
61    modern PowerPC hardware can also do unaligned integer loads and stores;
62    but note: the FPU still sends unaligned loads and stores to a trap handler!
63 */
64 
65 #define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
66 #define BROTLI_UNALIGNED_LOAD64(_p) (*(const uint64_t *)(_p))
67 
68 #define BROTLI_UNALIGNED_STORE32(_p, _val) \
69   (*(uint32_t *)(_p) = (_val))
70 #define BROTLI_UNALIGNED_STORE64(_p, _val) \
71   (*(uint64_t *)(_p) = (_val))
72 
73 #elif defined(__arm__) && \
74   !defined(__ARM_ARCH_5__) && \
75   !defined(__ARM_ARCH_5T__) && \
76   !defined(__ARM_ARCH_5TE__) && \
77   !defined(__ARM_ARCH_5TEJ__) && \
78   !defined(__ARM_ARCH_6__) && \
79   !defined(__ARM_ARCH_6J__) && \
80   !defined(__ARM_ARCH_6K__) && \
81   !defined(__ARM_ARCH_6Z__) && \
82   !defined(__ARM_ARCH_6ZK__) && \
83   !defined(__ARM_ARCH_6T2__)
84 
85 /* ARMv7 and newer support native unaligned accesses, but only of 16-bit
86    and 32-bit values (not 64-bit); older versions either raise a fatal signal,
87    do an unaligned read and rotate the words around a bit, or do the reads very
88    slowly (trip through kernel mode). */
89 
90 #define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
91 #define BROTLI_UNALIGNED_STORE32(_p, _val) \
92   (*(uint32_t *)(_p) = (_val))
93 
BROTLI_UNALIGNED_LOAD64(const void * p)94 static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
95   uint64_t t;
96   memcpy(&t, p, sizeof t);
97   return t;
98 }
99 
BROTLI_UNALIGNED_STORE64(void * p,uint64_t v)100 static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
101   memcpy(p, &v, sizeof v);
102 }
103 
104 #else
105 
106 /* These functions are provided for architectures that don't support */
107 /* unaligned loads and stores. */
108 
BROTLI_UNALIGNED_LOAD32(const void * p)109 static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {
110   uint32_t t;
111   memcpy(&t, p, sizeof t);
112   return t;
113 }
114 
BROTLI_UNALIGNED_LOAD64(const void * p)115 static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
116   uint64_t t;
117   memcpy(&t, p, sizeof t);
118   return t;
119 }
120 
BROTLI_UNALIGNED_STORE32(void * p,uint32_t v)121 static BROTLI_INLINE void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {
122   memcpy(p, &v, sizeof v);
123 }
124 
BROTLI_UNALIGNED_STORE64(void * p,uint64_t v)125 static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
126   memcpy(p, &v, sizeof v);
127 }
128 
129 #endif
130 
131 #define TEMPLATE_(T)                                                           \
132   static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \
133   static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }
134 TEMPLATE_(double) TEMPLATE_(float) TEMPLATE_(int)
135 TEMPLATE_(size_t) TEMPLATE_(uint32_t) TEMPLATE_(uint8_t)
136 #undef TEMPLATE_
137 #define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))
138 #define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))
139 
140 #define BROTLI_SWAP(T, A, I, J) { \
141   T __brotli_swap_tmp = (A)[(I)]; \
142   (A)[(I)] = (A)[(J)];            \
143   (A)[(J)] = __brotli_swap_tmp;   \
144 }
145 
146 #define BROTLI_ENSURE_CAPACITY(M, T, A, C, R) {  \
147   if (C < (R)) {                                 \
148     size_t _new_size = (C == 0) ? (R) : C;       \
149     T* new_array;                                \
150     while (_new_size < (R)) _new_size *= 2;      \
151     new_array = BROTLI_ALLOC((M), T, _new_size); \
152     if (!BROTLI_IS_OOM(m) && C != 0)             \
153       memcpy(new_array, A, C * sizeof(T));       \
154     BROTLI_FREE((M), A);                         \
155     A = new_array;                               \
156     C = _new_size;                               \
157   }                                              \
158 }
159 
160 #endif  /* BROTLI_ENC_PORT_H_ */
161