• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef SG_UNALIGNED_H
2 #define SG_UNALIGNED_H
3 
4 /*
5  * Copyright (c) 2014-2018 Douglas Gilbert.
6  * All rights reserved.
7  * Use of this source code is governed by a BSD-style
8  * license that can be found in the BSD_LICENSE file.
9  *
10  * SPDX-License-Identifier: BSD-2-Clause
11  */
12 
13 #include <stdbool.h>
14 #include <stdint.h>     /* for uint8_t and friends */
15 #include <string.h>     /* for memcpy */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 /* These inline functions convert integers (always unsigned) to byte streams
22  * and vice versa. They have two goals:
23  *   - change the byte ordering of integers between host order and big
24  *     endian ("_be") or little endian ("_le")
25  *   - copy the big or little endian byte stream so it complies with any
26  *     alignment that host integers require
27  *
28  * Host integer to given endian byte stream is a "_put_" function taking
29  * two arguments (integer and pointer to byte stream) returning void.
30  * Given endian byte stream to host integer is a "_get_" function that takes
31  * one argument and returns an integer of appropriate size (uint32_t for 24
32  * bit operations, uint64_t for 48 bit operations).
33  *
34  * Big endian byte format "on the wire" is the default used by SCSI
35  * standards (www.t10.org). Big endian is also the network byte order.
36  * Little endian is used by ATA, PCI and NVMe.
37  */
38 
39 /* The generic form of these routines was borrowed from the Linux kernel,
40  * via mhvtl. There is a specialised version of the main functions for
41  * little endian or big endian provided that not-quite-standard defines for
42  * endianness are available from the compiler and the <byteswap.h> header
43  * (a GNU extension) has been detected by ./configure . To force the
44  * generic version, use './configure --disable-fast-lebe ' . */
45 
46 /* Note: Assumes that the source and destination locations do not overlap.
47  * An example of overlapping source and destination:
48  *     sg_put_unaligned_le64(j, ((uint8_t *)&j) + 1);
49  * Best not to do things like that.
50  */
51 
52 
53 #ifdef HAVE_CONFIG_H
54 #include "config.h"     /* need this to see if HAVE_BYTESWAP_H */
55 #endif
56 
57 #undef GOT_UNALIGNED_SPECIALS   /* just in case */
58 
59 #if defined(__BYTE_ORDER__) && defined(HAVE_BYTESWAP_H) && \
60     ! defined(IGNORE_FAST_LEBE)
61 
62 #if defined(__LITTLE_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
63 
64 #define GOT_UNALIGNED_SPECIALS 1
65 
66 #include <byteswap.h>           /* for bswap_16(), bswap_32() and bswap_64() */
67 
68 // #warning ">>>>>> Doing Little endian special unaligneds"
69 
sg_get_unaligned_be16(const void * p)70 static inline uint16_t sg_get_unaligned_be16(const void *p)
71 {
72         uint16_t u;
73 
74         memcpy(&u, p, 2);
75         return bswap_16(u);
76 }
77 
sg_get_unaligned_be32(const void * p)78 static inline uint32_t sg_get_unaligned_be32(const void *p)
79 {
80         uint32_t u;
81 
82         memcpy(&u, p, 4);
83         return bswap_32(u);
84 }
85 
sg_get_unaligned_be64(const void * p)86 static inline uint64_t sg_get_unaligned_be64(const void *p)
87 {
88         uint64_t u;
89 
90         memcpy(&u, p, 8);
91         return bswap_64(u);
92 }
93 
sg_put_unaligned_be16(uint16_t val,void * p)94 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
95 {
96         uint16_t u = bswap_16(val);
97 
98         memcpy(p, &u, 2);
99 }
100 
sg_put_unaligned_be32(uint32_t val,void * p)101 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
102 {
103         uint32_t u = bswap_32(val);
104 
105         memcpy(p, &u, 4);
106 }
107 
sg_put_unaligned_be64(uint64_t val,void * p)108 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
109 {
110         uint64_t u = bswap_64(val);
111 
112         memcpy(p, &u, 8);
113 }
114 
sg_get_unaligned_le16(const void * p)115 static inline uint16_t sg_get_unaligned_le16(const void *p)
116 {
117         uint16_t u;
118 
119         memcpy(&u, p, 2);
120         return u;
121 }
122 
sg_get_unaligned_le32(const void * p)123 static inline uint32_t sg_get_unaligned_le32(const void *p)
124 {
125         uint32_t u;
126 
127         memcpy(&u, p, 4);
128         return u;
129 }
130 
sg_get_unaligned_le64(const void * p)131 static inline uint64_t sg_get_unaligned_le64(const void *p)
132 {
133         uint64_t u;
134 
135         memcpy(&u, p, 8);
136         return u;
137 }
138 
sg_put_unaligned_le16(uint16_t val,void * p)139 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
140 {
141         memcpy(p, &val, 2);
142 }
143 
sg_put_unaligned_le32(uint32_t val,void * p)144 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
145 {
146         memcpy(p, &val, 4);
147 }
148 
sg_put_unaligned_le64(uint64_t val,void * p)149 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
150 {
151         memcpy(p, &val, 8);
152 }
153 
154 #elif defined(__BIG_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
155 
156 #define GOT_UNALIGNED_SPECIALS 1
157 
158 #include <byteswap.h>
159 
160 // #warning ">>>>>> Doing BIG endian special unaligneds"
161 
162 static inline uint16_t sg_get_unaligned_le16(const void *p)
163 {
164         uint16_t u;
165 
166         memcpy(&u, p, 2);
167         return bswap_16(u);
168 }
169 
170 static inline uint32_t sg_get_unaligned_le32(const void *p)
171 {
172         uint32_t u;
173 
174         memcpy(&u, p, 4);
175         return bswap_32(u);
176 }
177 
178 static inline uint64_t sg_get_unaligned_le64(const void *p)
179 {
180         uint64_t u;
181 
182         memcpy(&u, p, 8);
183         return bswap_64(u);
184 }
185 
186 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
187 {
188         uint16_t u = bswap_16(val);
189 
190         memcpy(p, &u, 2);
191 }
192 
193 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
194 {
195         uint32_t u = bswap_32(val);
196 
197         memcpy(p, &u, 4);
198 }
199 
200 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
201 {
202         uint64_t u = bswap_64(val);
203 
204         memcpy(p, &u, 8);
205 }
206 
207 static inline uint16_t sg_get_unaligned_be16(const void *p)
208 {
209         uint16_t u;
210 
211         memcpy(&u, p, 2);
212         return u;
213 }
214 
215 static inline uint32_t sg_get_unaligned_be32(const void *p)
216 {
217         uint32_t u;
218 
219         memcpy(&u, p, 4);
220         return u;
221 }
222 
223 static inline uint64_t sg_get_unaligned_be64(const void *p)
224 {
225         uint64_t u;
226 
227         memcpy(&u, p, 8);
228         return u;
229 }
230 
231 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
232 {
233         memcpy(p, &val, 2);
234 }
235 
236 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
237 {
238         memcpy(p, &val, 4);
239 }
240 
241 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
242 {
243         memcpy(p, &val, 8);
244 }
245 
246 #endif          /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__  */
247 #endif          /* #if defined __BYTE_ORDER__ && defined <byteswap.h> &&
248                  *     ! defined IGNORE_FAST_LEBE */
249 
250 
251 #ifndef GOT_UNALIGNED_SPECIALS
252 
253 /* Now we have no tricks left, so use the only way this can be done
254  * correctly in C safely: lots of shifts. */
255 
256 // #warning ">>>>>> Doing GENERIC unaligneds"
257 
sg_get_unaligned_be16(const void * p)258 static inline uint16_t sg_get_unaligned_be16(const void *p)
259 {
260         return ((const uint8_t *)p)[0] << 8 | ((const uint8_t *)p)[1];
261 }
262 
sg_get_unaligned_be32(const void * p)263 static inline uint32_t sg_get_unaligned_be32(const void *p)
264 {
265         return ((const uint8_t *)p)[0] << 24 | ((const uint8_t *)p)[1] << 16 |
266                 ((const uint8_t *)p)[2] << 8 | ((const uint8_t *)p)[3];
267 }
268 
sg_get_unaligned_be64(const void * p)269 static inline uint64_t sg_get_unaligned_be64(const void *p)
270 {
271         return (uint64_t)sg_get_unaligned_be32(p) << 32 |
272                sg_get_unaligned_be32((const uint8_t *)p + 4);
273 }
274 
sg_put_unaligned_be16(uint16_t val,void * p)275 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
276 {
277         ((uint8_t *)p)[0] = (uint8_t)(val >> 8);
278         ((uint8_t *)p)[1] = (uint8_t)val;
279 }
280 
sg_put_unaligned_be32(uint32_t val,void * p)281 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
282 {
283         sg_put_unaligned_be16(val >> 16, p);
284         sg_put_unaligned_be16(val, (uint8_t *)p + 2);
285 }
286 
sg_put_unaligned_be64(uint64_t val,void * p)287 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
288 {
289         sg_put_unaligned_be32(val >> 32, p);
290         sg_put_unaligned_be32(val, (uint8_t *)p + 4);
291 }
292 
293 
sg_get_unaligned_le16(const void * p)294 static inline uint16_t sg_get_unaligned_le16(const void *p)
295 {
296         return ((const uint8_t *)p)[1] << 8 | ((const uint8_t *)p)[0];
297 }
298 
sg_get_unaligned_le32(const void * p)299 static inline uint32_t sg_get_unaligned_le32(const void *p)
300 {
301         return ((const uint8_t *)p)[3] << 24 | ((const uint8_t *)p)[2] << 16 |
302                 ((const uint8_t *)p)[1] << 8 | ((const uint8_t *)p)[0];
303 }
304 
sg_get_unaligned_le64(const void * p)305 static inline uint64_t sg_get_unaligned_le64(const void *p)
306 {
307         return (uint64_t)sg_get_unaligned_le32((const uint8_t *)p + 4) << 32 |
308                sg_get_unaligned_le32(p);
309 }
310 
sg_put_unaligned_le16(uint16_t val,void * p)311 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
312 {
313         ((uint8_t *)p)[0] = val & 0xff;
314         ((uint8_t *)p)[1] = val >> 8;
315 }
316 
sg_put_unaligned_le32(uint32_t val,void * p)317 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
318 {
319         sg_put_unaligned_le16(val >> 16, (uint8_t *)p + 2);
320         sg_put_unaligned_le16(val, p);
321 }
322 
sg_put_unaligned_le64(uint64_t val,void * p)323 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
324 {
325         sg_put_unaligned_le32(val >> 32, (uint8_t *)p + 4);
326         sg_put_unaligned_le32(val, p);
327 }
328 
329 #endif          /* #ifndef GOT_UNALIGNED_SPECIALS */
330 
331 /* Following are lesser used conversions that don't have specializations
332  * for endianness; big endian first. In summary these are the 24, 48 bit and
333  * given-length conversions plus the "nz" conditional put conversions. */
334 
335 /* Now big endian, get 24+48 then put 24+48 */
sg_get_unaligned_be24(const void * p)336 static inline uint32_t sg_get_unaligned_be24(const void *p)
337 {
338         return ((const uint8_t *)p)[0] << 16 | ((const uint8_t *)p)[1] << 8 |
339                ((const uint8_t *)p)[2];
340 }
341 
342 /* Assume 48 bit value placed in uint64_t */
sg_get_unaligned_be48(const void * p)343 static inline uint64_t sg_get_unaligned_be48(const void *p)
344 {
345         return (uint64_t)sg_get_unaligned_be16(p) << 32 |
346                sg_get_unaligned_be32((const uint8_t *)p + 2);
347 }
348 
349 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
350  * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
351  * an 8 byte unsigned integer. */
sg_get_unaligned_be(int num_bytes,const void * p)352 static inline uint64_t sg_get_unaligned_be(int num_bytes, const void *p)
353 {
354         if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
355                 return 0;
356         else {
357                 const uint8_t * xp = (const uint8_t *)p;
358                 uint64_t res = *xp;
359 
360                 for (++xp; num_bytes > 1; ++xp, --num_bytes)
361                         res = (res << 8) | *xp;
362                 return res;
363         }
364 }
365 
sg_put_unaligned_be24(uint32_t val,void * p)366 static inline void sg_put_unaligned_be24(uint32_t val, void *p)
367 {
368         ((uint8_t *)p)[0] = (val >> 16) & 0xff;
369         ((uint8_t *)p)[1] = (val >> 8) & 0xff;
370         ((uint8_t *)p)[2] = val & 0xff;
371 }
372 
373 /* Assume 48 bit value placed in uint64_t */
sg_put_unaligned_be48(uint64_t val,void * p)374 static inline void sg_put_unaligned_be48(uint64_t val, void *p)
375 {
376         sg_put_unaligned_be16(val >> 32, p);
377         sg_put_unaligned_be32(val, (uint8_t *)p + 2);
378 }
379 
380 /* Now little endian, get 24+48 then put 24+48 */
sg_get_unaligned_le24(const void * p)381 static inline uint32_t sg_get_unaligned_le24(const void *p)
382 {
383         return (uint32_t)sg_get_unaligned_le16(p) |
384                ((const uint8_t *)p)[2] << 16;
385 }
386 
387 /* Assume 48 bit value placed in uint64_t */
sg_get_unaligned_le48(const void * p)388 static inline uint64_t sg_get_unaligned_le48(const void *p)
389 {
390         return (uint64_t)sg_get_unaligned_le16((const uint8_t *)p + 4) << 32 |
391                sg_get_unaligned_le32(p);
392 }
393 
sg_put_unaligned_le24(uint32_t val,void * p)394 static inline void sg_put_unaligned_le24(uint32_t val, void *p)
395 {
396         ((uint8_t *)p)[2] = (val >> 16) & 0xff;
397         ((uint8_t *)p)[1] = (val >> 8) & 0xff;
398         ((uint8_t *)p)[0] = val & 0xff;
399 }
400 
401 /* Assume 48 bit value placed in uint64_t */
sg_put_unaligned_le48(uint64_t val,void * p)402 static inline void sg_put_unaligned_le48(uint64_t val, void *p)
403 {
404         ((uint8_t *)p)[5] = (val >> 40) & 0xff;
405         ((uint8_t *)p)[4] = (val >> 32) & 0xff;
406         ((uint8_t *)p)[3] = (val >> 24) & 0xff;
407         ((uint8_t *)p)[2] = (val >> 16) & 0xff;
408         ((uint8_t *)p)[1] = (val >> 8) & 0xff;
409         ((uint8_t *)p)[0] = val & 0xff;
410 }
411 
412 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
413  * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
414  * an 8 byte unsigned integer. */
sg_get_unaligned_le(int num_bytes,const void * p)415 static inline uint64_t sg_get_unaligned_le(int num_bytes, const void *p)
416 {
417         if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
418                 return 0;
419         else {
420                 const uint8_t * xp = (const uint8_t *)p + (num_bytes - 1);
421                 uint64_t res = *xp;
422 
423                 for (--xp; num_bytes > 1; --xp, --num_bytes)
424                         res = (res << 8) | *xp;
425                 return res;
426         }
427 }
428 
429 /* Since cdb and parameter blocks are often memset to zero before these
430  * unaligned function partially fill them, then check for a val of zero
431  * and ignore if it is with these variants. First big endian, then little */
sg_nz_put_unaligned_be16(uint16_t val,void * p)432 static inline void sg_nz_put_unaligned_be16(uint16_t val, void *p)
433 {
434         if (val)
435                 sg_put_unaligned_be16(val, p);
436 }
437 
sg_nz_put_unaligned_be24(uint32_t val,void * p)438 static inline void sg_nz_put_unaligned_be24(uint32_t val, void *p)
439 {
440         if (val) {
441                 ((uint8_t *)p)[0] = (val >> 16) & 0xff;
442                 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
443                 ((uint8_t *)p)[2] = val & 0xff;
444         }
445 }
446 
sg_nz_put_unaligned_be32(uint32_t val,void * p)447 static inline void sg_nz_put_unaligned_be32(uint32_t val, void *p)
448 {
449         if (val)
450                 sg_put_unaligned_be32(val, p);
451 }
452 
sg_nz_put_unaligned_be64(uint64_t val,void * p)453 static inline void sg_nz_put_unaligned_be64(uint64_t val, void *p)
454 {
455         if (val)
456             sg_put_unaligned_be64(val, p);
457 }
458 
sg_nz_put_unaligned_le16(uint16_t val,void * p)459 static inline void sg_nz_put_unaligned_le16(uint16_t val, void *p)
460 {
461         if (val)
462                 sg_put_unaligned_le16(val, p);
463 }
464 
sg_nz_put_unaligned_le24(uint32_t val,void * p)465 static inline void sg_nz_put_unaligned_le24(uint32_t val, void *p)
466 {
467         if (val) {
468                 ((uint8_t *)p)[2] = (val >> 16) & 0xff;
469                 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
470                 ((uint8_t *)p)[0] = val & 0xff;
471         }
472 }
473 
sg_nz_put_unaligned_le32(uint32_t val,void * p)474 static inline void sg_nz_put_unaligned_le32(uint32_t val, void *p)
475 {
476         if (val)
477                 sg_put_unaligned_le32(val, p);
478 }
479 
sg_nz_put_unaligned_le64(uint64_t val,void * p)480 static inline void sg_nz_put_unaligned_le64(uint64_t val, void *p)
481 {
482         if (val)
483             sg_put_unaligned_le64(val, p);
484 }
485 
486 
487 #ifdef __cplusplus
488 }
489 #endif
490 
491 #endif /* SG_UNALIGNED_H */
492