1 /* Unaligned memory access functionality.
2 Copyright (C) 2000-2010 Red Hat, Inc.
3 This file is part of Red Hat elfutils.
4 Written by Ulrich Drepper <drepper@redhat.com>, 2001.
5
6 Red Hat elfutils is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by the
8 Free Software Foundation; version 2 of the License.
9
10 Red Hat elfutils is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
14
15 You should have received a copy of the GNU General Public License along
16 with Red Hat elfutils; if not, write to the Free Software Foundation,
17 Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
18
19 In addition, as a special exception, Red Hat, Inc. gives You the
20 additional right to link the code of Red Hat elfutils with code licensed
21 under any Open Source Initiative certified open source license
22 (http://www.opensource.org/licenses/index.php) which requires the
23 distribution of source code with any binary distribution and to
24 distribute linked combinations of the two. Non-GPL Code permitted under
25 this exception must only link to the code of Red Hat elfutils through
26 those well defined interfaces identified in the file named EXCEPTION
27 found in the source code files (the "Approved Interfaces"). The files
28 of Non-GPL Code may instantiate templates or use macros or inline
29 functions from the Approved Interfaces without causing the resulting
30 work to be covered by the GNU General Public License. Only Red Hat,
31 Inc. may make changes or additions to the list of Approved Interfaces.
32 Red Hat's grant of this exception is conditioned upon your not adding
33 any new exceptions. If you wish to add a new Approved Interface or
34 exception, please contact Red Hat. You must obey the GNU General Public
35 License in all respects for all of the Red Hat elfutils code and other
36 code used in conjunction with Red Hat elfutils except the Non-GPL Code
37 covered by this exception. If you modify this file, you may extend this
38 exception to your version of the file, but you are not obligated to do
39 so. If you do not wish to provide this exception without modification,
40 you must delete this exception statement from your version and license
41 this file solely under the GPL without exception.
42
43 Red Hat elfutils is an included package of the Open Invention Network.
44 An included package of the Open Invention Network is a package for which
45 Open Invention Network licensees cross-license their patents. No patent
46 license is granted, either expressly or impliedly, by designation as an
47 included package. Should you wish to participate in the Open Invention
48 Network licensing program, please visit www.openinventionnetwork.com
49 <http://www.openinventionnetwork.com>. */
50
51 #ifndef _MEMORY_ACCESS_H
52 #define _MEMORY_ACCESS_H 1
53
54 #include <byteswap.h>
55 #include <limits.h>
56 #include <stdint.h>
57
58
59 /* Number decoding macros. See 7.6 Variable Length Data. */
60
61 #define get_uleb128_step(var, addr, nth, break) \
62 __b = *(addr)++; \
63 var |= (uintmax_t) (__b & 0x7f) << (nth * 7); \
64 if (likely ((__b & 0x80) == 0)) \
65 break
66
67 #define get_uleb128(var, addr) \
68 do { \
69 unsigned char __b; \
70 var = 0; \
71 get_uleb128_step (var, addr, 0, break); \
72 var = __libdw_get_uleb128 (var, 1, &(addr)); \
73 } while (0)
74
75 #define get_uleb128_rest_return(var, i, addrp) \
76 do { \
77 for (; i < 10; ++i) \
78 { \
79 get_uleb128_step (var, *addrp, i, return var); \
80 } \
81 /* Other implementations set VALUE to UINT_MAX in this \
82 case. So we better do this as well. */ \
83 return UINT64_MAX; \
84 } while (0)
85
86 /* The signed case is similar, but we sign-extend the result. */
87
88 #define get_sleb128_step(var, addr, nth, break) \
89 __b = *(addr)++; \
90 _v |= (uint64_t) (__b & 0x7f) << (nth * 7); \
91 if (likely ((__b & 0x80) == 0)) \
92 { \
93 var = (_v << (64 - (nth * 7) - 7)) >> (64 - (nth * 7) - 7); \
94 break; \
95 } \
96 else do {} while (0)
97
98 #define get_sleb128(var, addr) \
99 do { \
100 unsigned char __b; \
101 int64_t _v = 0; \
102 get_sleb128_step (var, addr, 0, break); \
103 var = __libdw_get_sleb128 (_v, 1, &(addr)); \
104 } while (0)
105
106 #define get_sleb128_rest_return(var, i, addrp) \
107 do { \
108 for (; i < 9; ++i) \
109 { \
110 get_sleb128_step (var, *addrp, i, return var); \
111 } \
112 __b = *(*addrp)++; \
113 if (likely ((__b & 0x80) == 0)) \
114 return var | ((uint64_t) __b << 63); \
115 else \
116 /* Other implementations set VALUE to INT_MAX in this \
117 case. So we better do this as well. */ \
118 return INT64_MAX; \
119 } while (0)
120
121 #ifdef IS_LIBDW
122 extern uint64_t __libdw_get_uleb128 (uint64_t acc, unsigned int i,
123 const unsigned char **addrp)
124 internal_function attribute_hidden;
125 extern int64_t __libdw_get_sleb128 (int64_t acc, unsigned int i,
126 const unsigned char **addrp)
127 internal_function attribute_hidden;
128 #else
129 static inline uint64_t
130 __attribute__ ((unused))
__libdw_get_uleb128(uint64_t acc,unsigned int i,const unsigned char ** addrp)131 __libdw_get_uleb128 (uint64_t acc, unsigned int i, const unsigned char **addrp)
132 {
133 unsigned char __b;
134 get_uleb128_rest_return (acc, i, addrp);
135 }
136 static inline int64_t
137 __attribute__ ((unused))
__libdw_get_sleb128(int64_t acc,unsigned int i,const unsigned char ** addrp)138 __libdw_get_sleb128 (int64_t acc, unsigned int i, const unsigned char **addrp)
139 {
140 unsigned char __b;
141 int64_t _v = acc;
142 get_sleb128_rest_return (acc, i, addrp);
143 }
144 #endif
145
146
147 /* We use simple memory access functions in case the hardware allows it.
148 The caller has to make sure we don't have alias problems. */
149 #if ALLOW_UNALIGNED
150
151 # define read_2ubyte_unaligned(Dbg, Addr) \
152 (unlikely ((Dbg)->other_byte_order) \
153 ? bswap_16 (*((const uint16_t *) (Addr))) \
154 : *((const uint16_t *) (Addr)))
155 # define read_2sbyte_unaligned(Dbg, Addr) \
156 (unlikely ((Dbg)->other_byte_order) \
157 ? (int16_t) bswap_16 (*((const int16_t *) (Addr))) \
158 : *((const int16_t *) (Addr)))
159
160 # define read_4ubyte_unaligned_noncvt(Addr) \
161 *((const uint32_t *) (Addr))
162 # define read_4ubyte_unaligned(Dbg, Addr) \
163 (unlikely ((Dbg)->other_byte_order) \
164 ? bswap_32 (*((const uint32_t *) (Addr))) \
165 : *((const uint32_t *) (Addr)))
166 # define read_4sbyte_unaligned(Dbg, Addr) \
167 (unlikely ((Dbg)->other_byte_order) \
168 ? (int32_t) bswap_32 (*((const int32_t *) (Addr))) \
169 : *((const int32_t *) (Addr)))
170
171 # define read_8ubyte_unaligned(Dbg, Addr) \
172 (unlikely ((Dbg)->other_byte_order) \
173 ? bswap_64 (*((const uint64_t *) (Addr))) \
174 : *((const uint64_t *) (Addr)))
175 # define read_8sbyte_unaligned(Dbg, Addr) \
176 (unlikely ((Dbg)->other_byte_order) \
177 ? (int64_t) bswap_64 (*((const int64_t *) (Addr))) \
178 : *((const int64_t *) (Addr)))
179
180 #else
181
182 union unaligned
183 {
184 void *p;
185 uint16_t u2;
186 uint32_t u4;
187 uint64_t u8;
188 int16_t s2;
189 int32_t s4;
190 int64_t s8;
191 } __attribute__ ((packed));
192
193 # define read_2ubyte_unaligned(Dbg, Addr) \
194 read_2ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
195 # define read_2sbyte_unaligned(Dbg, Addr) \
196 read_2sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
197 # define read_4ubyte_unaligned(Dbg, Addr) \
198 read_4ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
199 # define read_4sbyte_unaligned(Dbg, Addr) \
200 read_4sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
201 # define read_8ubyte_unaligned(Dbg, Addr) \
202 read_8ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
203 # define read_8sbyte_unaligned(Dbg, Addr) \
204 read_8sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
205
206 static inline uint16_t
read_2ubyte_unaligned_1(bool other_byte_order,const void * p)207 read_2ubyte_unaligned_1 (bool other_byte_order, const void *p)
208 {
209 const union unaligned *up = p;
210 if (unlikely (other_byte_order))
211 return bswap_16 (up->u2);
212 return up->u2;
213 }
214 static inline int16_t
read_2sbyte_unaligned_1(bool other_byte_order,const void * p)215 read_2sbyte_unaligned_1 (bool other_byte_order, const void *p)
216 {
217 const union unaligned *up = p;
218 if (unlikely (other_byte_order))
219 return (int16_t) bswap_16 (up->u2);
220 return up->s2;
221 }
222
223 static inline uint32_t
read_4ubyte_unaligned_noncvt(const void * p)224 read_4ubyte_unaligned_noncvt (const void *p)
225 {
226 const union unaligned *up = p;
227 return up->u4;
228 }
229 static inline uint32_t
read_4ubyte_unaligned_1(bool other_byte_order,const void * p)230 read_4ubyte_unaligned_1 (bool other_byte_order, const void *p)
231 {
232 const union unaligned *up = p;
233 if (unlikely (other_byte_order))
234 return bswap_32 (up->u4);
235 return up->u4;
236 }
237 static inline int32_t
read_4sbyte_unaligned_1(bool other_byte_order,const void * p)238 read_4sbyte_unaligned_1 (bool other_byte_order, const void *p)
239 {
240 const union unaligned *up = p;
241 if (unlikely (other_byte_order))
242 return (int32_t) bswap_32 (up->u4);
243 return up->s4;
244 }
245
246 static inline uint64_t
read_8ubyte_unaligned_1(bool other_byte_order,const void * p)247 read_8ubyte_unaligned_1 (bool other_byte_order, const void *p)
248 {
249 const union unaligned *up = p;
250 if (unlikely (other_byte_order))
251 return bswap_64 (up->u8);
252 return up->u8;
253 }
254 static inline int64_t
read_8sbyte_unaligned_1(bool other_byte_order,const void * p)255 read_8sbyte_unaligned_1 (bool other_byte_order, const void *p)
256 {
257 const union unaligned *up = p;
258 if (unlikely (other_byte_order))
259 return (int64_t) bswap_64 (up->u8);
260 return up->s8;
261 }
262
263 #endif /* allow unaligned */
264
265
266 #define read_ubyte_unaligned(Nbytes, Dbg, Addr) \
267 ((Nbytes) == 2 ? read_2ubyte_unaligned (Dbg, Addr) \
268 : (Nbytes) == 4 ? read_4ubyte_unaligned (Dbg, Addr) \
269 : read_8ubyte_unaligned (Dbg, Addr))
270
271 #define read_sbyte_unaligned(Nbytes, Dbg, Addr) \
272 ((Nbytes) == 2 ? read_2sbyte_unaligned (Dbg, Addr) \
273 : (Nbytes) == 4 ? read_4sbyte_unaligned (Dbg, Addr) \
274 : read_8sbyte_unaligned (Dbg, Addr))
275
276
277 #define read_2ubyte_unaligned_inc(Dbg, Addr) \
278 ({ uint16_t t_ = read_2ubyte_unaligned (Dbg, Addr); \
279 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2); \
280 t_; })
281 #define read_2sbyte_unaligned_inc(Dbg, Addr) \
282 ({ int16_t t_ = read_2sbyte_unaligned (Dbg, Addr); \
283 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2); \
284 t_; })
285
286 #define read_4ubyte_unaligned_inc(Dbg, Addr) \
287 ({ uint32_t t_ = read_4ubyte_unaligned (Dbg, Addr); \
288 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4); \
289 t_; })
290 #define read_4sbyte_unaligned_inc(Dbg, Addr) \
291 ({ int32_t t_ = read_4sbyte_unaligned (Dbg, Addr); \
292 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4); \
293 t_; })
294
295 #define read_8ubyte_unaligned_inc(Dbg, Addr) \
296 ({ uint64_t t_ = read_8ubyte_unaligned (Dbg, Addr); \
297 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8); \
298 t_; })
299 #define read_8sbyte_unaligned_inc(Dbg, Addr) \
300 ({ int64_t t_ = read_8sbyte_unaligned (Dbg, Addr); \
301 Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8); \
302 t_; })
303
304
305 #define read_ubyte_unaligned_inc(Nbytes, Dbg, Addr) \
306 ((Nbytes) == 2 ? read_2ubyte_unaligned_inc (Dbg, Addr) \
307 : (Nbytes) == 4 ? read_4ubyte_unaligned_inc (Dbg, Addr) \
308 : read_8ubyte_unaligned_inc (Dbg, Addr))
309
310 #define read_sbyte_unaligned_inc(Nbytes, Dbg, Addr) \
311 ((Nbytes) == 2 ? read_2sbyte_unaligned_inc (Dbg, Addr) \
312 : (Nbytes) == 4 ? read_4sbyte_unaligned_inc (Dbg, Addr) \
313 : read_8sbyte_unaligned_inc (Dbg, Addr))
314
315 #endif /* memory-access.h */
316