• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995, 1996
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that: (1) source code distributions
7  * retain the above copyright notice and this paragraph in its entirety, (2)
8  * distributions including binary code include the above copyright notice and
9  * this paragraph in its entirety in the documentation or other materials
10  * provided with the distribution, and (3) all advertising materials mentioning
11  * features or use of this software display the following acknowledgement:
12  * ``This product includes software developed by the University of California,
13  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14  * the University nor the names of its contributors may be used to endorse
15  * or promote products derived from this software without specific prior
16  * written permission.
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20  */
21 
22 #ifndef _WIN32
23 #include <arpa/inet.h>
24 #endif
25 
26 #include <pcap/pcap-inttypes.h>
27 #include <pcap/compiler-tests.h>
28 #include "portability.h"
29 
30 /*
31  * If we have versions of GCC or Clang that support an __attribute__
32  * to say "if we're building with unsigned behavior sanitization,
33  * don't complain about undefined behavior in this function", we
34  * label these functions with that attribute - we *know* it's undefined
35  * in the C standard, but we *also* know it does what we want with
36  * the ISA we're targeting and the compiler we're using.
37  *
38  * For GCC 4.9.0 and later, we use __attribute__((no_sanitize_undefined));
39  * pre-5.0 GCC doesn't have __has_attribute, and I'm not sure whether
40  * GCC or Clang first had __attribute__((no_sanitize(XXX)).
41  *
42  * For Clang, we check for __attribute__((no_sanitize(XXX)) with
43  * __has_attribute, as there are versions of Clang that support
44  * __attribute__((no_sanitize("undefined")) but don't support
45  * __attribute__((no_sanitize_undefined)).
46  *
47  * We define this here, rather than in funcattrs.h, because we
48  * only want it used here, we don't want it to be broadly used.
49  * (Any printer will get this defined, but this should at least
50  * make it harder for people to find.)
51  */
52 #if defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 409)
53 #define UNALIGNED_OK	__attribute__((no_sanitize_undefined))
54 #elif __has_attribute(no_sanitize)
55 #define UNALIGNED_OK	__attribute__((no_sanitize("undefined")))
56 #else
57 #define UNALIGNED_OK
58 #endif
59 
60 #if (defined(__i386__) || defined(_M_IX86) || defined(__X86__) || defined(__x86_64__) || defined(_M_X64)) || \
61     (defined(__m68k__) && (!defined(__mc68000__) && !defined(__mc68010__))) || \
62     (defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || defined(_ARCH_PPC64)) || \
63     (defined(__s390__) || defined(__s390x__) || defined(__zarch__))
64 /*
65  * The processor natively handles unaligned loads, so we can just
66  * cast the pointer and fetch through it.
67  *
68  * XXX - are those all the x86 tests we need?
69  * XXX - are those the only 68k tests we need not to generated
70  * unaligned accesses if the target is the 68000 or 68010?
71  * XXX - are there any tests we don't need, because some definitions are for
72  * compilers that also predefine the GCC symbols?
73  * XXX - do we need to test for both 32-bit and 64-bit versions of those
74  * architectures in all cases?
75  */
76 UNALIGNED_OK static inline uint16_t
EXTRACT_BE_U_2(const void * p)77 EXTRACT_BE_U_2(const void *p)
78 {
79 	return ((uint16_t)ntohs(*(const uint16_t *)(p)));
80 }
81 
82 UNALIGNED_OK static inline int16_t
EXTRACT_BE_S_2(const void * p)83 EXTRACT_BE_S_2(const void *p)
84 {
85 	return ((int16_t)ntohs(*(const int16_t *)(p)));
86 }
87 
88 UNALIGNED_OK static inline uint32_t
EXTRACT_BE_U_4(const void * p)89 EXTRACT_BE_U_4(const void *p)
90 {
91 	return ((uint32_t)ntohl(*(const uint32_t *)(p)));
92 }
93 
94 UNALIGNED_OK static inline int32_t
EXTRACT_BE_S_4(const void * p)95 EXTRACT_BE_S_4(const void *p)
96 {
97 	return ((int32_t)ntohl(*(const int32_t *)(p)));
98 }
99 
100 UNALIGNED_OK static inline uint64_t
EXTRACT_BE_U_8(const void * p)101 EXTRACT_BE_U_8(const void *p)
102 {
103 	return ((uint64_t)(((uint64_t)ntohl(*((const uint32_t *)(p) + 0))) << 32 |
104 		((uint64_t)ntohl(*((const uint32_t *)(p) + 1))) << 0));
105 
106 }
107 
108 UNALIGNED_OK static inline int64_t
EXTRACT_BE_S_8(const void * p)109 EXTRACT_BE_S_8(const void *p)
110 {
111 	return ((int64_t)(((int64_t)ntohl(*((const uint32_t *)(p) + 0))) << 32 |
112 		((uint64_t)ntohl(*((const uint32_t *)(p) + 1))) << 0));
113 
114 }
115 #elif PCAP_IS_AT_LEAST_GNUC_VERSION(2,0) && \
116     (defined(__alpha) || defined(__alpha__) || \
117      defined(__mips) || defined(__mips__))
118 /*
119  * This is MIPS or Alpha, which don't natively handle unaligned loads,
120  * but which have instructions that can help when doing unaligned
121  * loads, and this is GCC 2.0 or later or a compiler that claims to
122  * be GCC 2.0 or later, which we assume that mean we have
123  * __attribute__((packed)), which we can use to convince the compiler
124  * to generate those instructions.
125  *
126  * Declare packed structures containing a uint16_t and a uint32_t,
127  * cast the pointer to point to one of those, and fetch through it;
128  * the GCC manual doesn't appear to explicitly say that
129  * __attribute__((packed)) causes the compiler to generate unaligned-safe
130  * code, but it apppears to do so.
131  *
132  * We do this in case the compiler can generate code using those
133  * instructions to do an unaligned load and pass stuff to "ntohs()" or
134  * "ntohl()", which might be better than the code to fetch the
135  * bytes one at a time and assemble them.  (That might not be the
136  * case on a little-endian platform, such as DEC's MIPS machines and
137  * Alpha machines, where "ntohs()" and "ntohl()" might not be done
138  * inline.)
139  *
140  * We do this only for specific architectures because, for example,
141  * at least some versions of GCC, when compiling for 64-bit SPARC,
142  * generate code that assumes alignment if we do this.
143  *
144  * XXX - add other architectures and compilers as possible and
145  * appropriate.
146  *
147  * HP's C compiler, indicated by __HP_cc being defined, supports
148  * "#pragma unaligned N" in version A.05.50 and later, where "N"
149  * specifies a number of bytes at which the typedef on the next
150  * line is aligned, e.g.
151  *
152  *	#pragma unalign 1
153  *	typedef uint16_t unaligned_uint16_t;
154  *
155  * to define unaligned_uint16_t as a 16-bit unaligned data type.
156  * This could be presumably used, in sufficiently recent versions of
157  * the compiler, with macros similar to those below.  This would be
158  * useful only if that compiler could generate better code for PA-RISC
159  * or Itanium than would be generated by a bunch of shifts-and-ORs.
160  *
161  * DEC C, indicated by __DECC being defined, has, at least on Alpha,
162  * an __unaligned qualifier that can be applied to pointers to get the
163  * compiler to generate code that does unaligned loads and stores when
164  * dereferencing the pointer in question.
165  *
166  * XXX - what if the native C compiler doesn't support
167  * __attribute__((packed))?  How can we get it to generate unaligned
168  * accesses for *specific* items?
169  */
170 typedef struct {
171 	uint16_t	val;
172 } __attribute__((packed)) unaligned_uint16_t;
173 
174 typedef struct {
175 	int16_t		val;
176 } __attribute__((packed)) unaligned_int16_t;
177 
178 typedef struct {
179 	uint32_t	val;
180 } __attribute__((packed)) unaligned_uint32_t;
181 
182 typedef struct {
183 	int32_t		val;
184 } __attribute__((packed)) unaligned_int32_t;
185 
186 UNALIGNED_OK static inline uint16_t
EXTRACT_BE_U_2(const void * p)187 EXTRACT_BE_U_2(const void *p)
188 {
189 	return ((uint16_t)ntohs(((const unaligned_uint16_t *)(p))->val));
190 }
191 
192 UNALIGNED_OK static inline int16_t
EXTRACT_BE_S_2(const void * p)193 EXTRACT_BE_S_2(const void *p)
194 {
195 	return ((int16_t)ntohs(((const unaligned_int16_t *)(p))->val));
196 }
197 
198 UNALIGNED_OK static inline uint32_t
EXTRACT_BE_U_4(const void * p)199 EXTRACT_BE_U_4(const void *p)
200 {
201 	return ((uint32_t)ntohl(((const unaligned_uint32_t *)(p))->val));
202 }
203 
204 UNALIGNED_OK static inline int32_t
EXTRACT_BE_S_4(const void * p)205 EXTRACT_BE_S_4(const void *p)
206 {
207 	return ((int32_t)ntohl(((const unaligned_int32_t *)(p))->val));
208 }
209 
210 UNALIGNED_OK static inline uint64_t
EXTRACT_BE_U_8(const void * p)211 EXTRACT_BE_U_8(const void *p)
212 {
213 	return ((uint64_t)(((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 0)->val)) << 32 |
214 		((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 1)->val)) << 0));
215 }
216 
217 UNALIGNED_OK static inline int64_t
EXTRACT_BE_S_8(const void * p)218 EXTRACT_BE_S_8(const void *p)
219 {
220 	return ((int64_t)(((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 0)->val)) << 32 |
221 		((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 1)->val)) << 0));
222 }
223 #else
224 /*
225  * This architecture doesn't natively support unaligned loads, and either
226  * this isn't a GCC-compatible compiler, we don't have __attribute__,
227  * or we do but we don't know of any better way with this instruction
228  * set to do unaligned loads, so do unaligned loads of big-endian
229  * quantities the hard way - fetch the bytes one at a time and
230  * assemble them.
231  *
232  * XXX - ARM is a special case.  ARMv1 through ARMv5 didn't suppory
233  * unaligned loads; ARMv6 and later support it *but* have a bit in
234  * the system control register that the OS can set and that causes
235  * unaligned loads to fault rather than succeeding.
236  *
237  * At least some OSes may set that flag, so we do *not* treat ARM
238  * as supporting unaligned loads.  If your OS supports them on ARM,
239  * and you want to use them, please update the tests in the #if above
240  * to check for ARM *and* for your OS.
241  */
242 #define EXTRACT_BE_U_2(p) \
243 	((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \
244 	            ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0)))
245 #define EXTRACT_BE_S_2(p) \
246 	((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \
247 	           ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0)))
248 #define EXTRACT_BE_U_4(p) \
249 	((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
250 	            ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
251 	            ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
252 	            ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
253 #define EXTRACT_BE_S_4(p) \
254 	((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
255 	           ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
256 	           ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
257 	           ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
258 #define EXTRACT_BE_U_8(p) \
259 	((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \
260 	            ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \
261 	            ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \
262 	            ((uint64_t)(*((const uint8_t *)(p) + 3)) << 32) | \
263 	            ((uint64_t)(*((const uint8_t *)(p) + 4)) << 24) | \
264 	            ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \
265 	            ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \
266 	            ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0)))
267 #define EXTRACT_BE_S_8(p) \
268 	((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \
269 	           ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \
270 	           ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \
271 	           ((uint64_t)(*((const uint8_t *)(p) + 3)) << 32) | \
272 	           ((uint64_t)(*((const uint8_t *)(p) + 4)) << 24) | \
273 	           ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \
274 	           ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \
275 	           ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0)))
276 
277 /*
278  * Extract an IPv4 address, which is in network byte order, and not
279  * necessarily aligned, and provide the result in host byte order.
280  */
281 #define EXTRACT_IPV4_TO_HOST_ORDER(p) \
282 	((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
283 	            ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
284 	            ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
285 	            ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
286 #endif /* unaligned access checks */
287 
288 /*
289  * Non-power-of-2 sizes.
290  */
291 #define EXTRACT_BE_U_3(p) \
292 	((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
293 	            ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
294 	            ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0)))
295 
296 #define EXTRACT_BE_S_3(p) \
297 	(((*((const uint8_t *)(p) + 0)) & 0x80) ? \
298 	  ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
299 	             ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
300 	             ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0))) : \
301 	  ((int32_t)(0xFF000000U | \
302 	             ((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
303 	             ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
304 	             ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0))))
305 
306 #define EXTRACT_BE_U_5(p) \
307 	((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
308 	            ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
309 	            ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
310 	            ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
311 	            ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0)))
312 
313 #define EXTRACT_BE_S_5(p) \
314 	(((*((const uint8_t *)(p) + 0)) & 0x80) ? \
315 	  ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
316 	             ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
317 	             ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
318 	             ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
319 	             ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0))) : \
320 	  ((int64_t)(INT64_T_CONSTANT(0xFFFFFF0000000000U) | \
321 	             ((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
322 	             ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
323 	             ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
324 	             ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
325 	             ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0))))
326 
327 #define EXTRACT_BE_U_6(p) \
328 	((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
329 	            ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
330 	            ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
331 	            ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
332 	            ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
333 	            ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0)))
334 
335 #define EXTRACT_BE_S_6(p) \
336 	(((*((const uint8_t *)(p) + 0)) & 0x80) ? \
337 	   ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
338 	              ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
339 	              ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
340 	              ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
341 	              ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
342 	              ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0))) : \
343 	  ((int64_t)(INT64_T_CONSTANT(0xFFFFFFFF00000000U) | \
344 	              ((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
345 	              ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
346 	              ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
347 	              ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
348 	              ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
349 	              ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0))))
350 
351 #define EXTRACT_BE_U_7(p) \
352 	((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
353 	            ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
354 	            ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
355 	            ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
356 	            ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
357 	            ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
358 	            ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0)))
359 
360 #define EXTRACT_BE_S_7(p) \
361 	(((*((const uint8_t *)(p) + 0)) & 0x80) ? \
362 	  ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
363 	             ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
364 	             ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
365 	             ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
366 	             ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
367 	             ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
368 	             ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0))) : \
369 	    ((int64_t)(INT64_T_CONSTANT(0xFFFFFFFFFF000000U) | \
370 	             ((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
371 	             ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
372 	             ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
373 	             ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
374 	             ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
375 	             ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
376 	             ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0))))
377 
378 /*
379  * Macros to extract possibly-unaligned little-endian integral values.
380  * XXX - do loads on little-endian machines that support unaligned loads?
381  */
382 #define EXTRACT_LE_U_2(p) \
383 	((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \
384 	            ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0)))
385 #define EXTRACT_LE_S_2(p) \
386 	((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \
387 	           ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0)))
388 #define EXTRACT_LE_U_4(p) \
389 	((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \
390 	            ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
391 	            ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
392 	            ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
393 #define EXTRACT_LE_S_4(p) \
394 	((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \
395 	           ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
396 	           ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
397 	           ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
398 #define EXTRACT_LE_U_3(p) \
399 	((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
400 	            ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
401 	            ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
402 #define EXTRACT_LE_S_3(p) \
403 	((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
404 	           ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
405 	           ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
406 #define EXTRACT_LE_U_8(p) \
407 	((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \
408 	            ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \
409 	            ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
410 	            ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
411 	            ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
412 	            ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
413 	            ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
414 	            ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
415 #define EXTRACT_LE_S_8(p) \
416 	((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \
417 	           ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \
418 	           ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
419 	           ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
420 	           ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
421 	           ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
422 	           ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
423 	           ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
424