1 /* Core file handling.
2 Copyright (C) 2008 Red Hat, Inc.
3 This file is part of Red Hat elfutils.
4
5 Red Hat elfutils is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by the
7 Free Software Foundation; version 2 of the License.
8
9 Red Hat elfutils is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
13
14 You should have received a copy of the GNU General Public License along
15 with Red Hat elfutils; if not, write to the Free Software Foundation,
16 Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
17
18 In addition, as a special exception, Red Hat, Inc. gives You the
19 additional right to link the code of Red Hat elfutils with code licensed
20 under any Open Source Initiative certified open source license
21 (http://www.opensource.org/licenses/index.php) which requires the
22 distribution of source code with any binary distribution and to
23 distribute linked combinations of the two. Non-GPL Code permitted under
24 this exception must only link to the code of Red Hat elfutils through
25 those well defined interfaces identified in the file named EXCEPTION
26 found in the source code files (the "Approved Interfaces"). The files
27 of Non-GPL Code may instantiate templates or use macros or inline
28 functions from the Approved Interfaces without causing the resulting
29 work to be covered by the GNU General Public License. Only Red Hat,
30 Inc. may make changes or additions to the list of Approved Interfaces.
31 Red Hat's grant of this exception is conditioned upon your not adding
32 any new exceptions. If you wish to add a new Approved Interface or
33 exception, please contact Red Hat. You must obey the GNU General Public
34 License in all respects for all of the Red Hat elfutils code and other
35 code used in conjunction with Red Hat elfutils except the Non-GPL Code
36 covered by this exception. If you modify this file, you may extend this
37 exception to your version of the file, but you are not obligated to do
38 so. If you do not wish to provide this exception without modification,
39 you must delete this exception statement from your version and license
40 this file solely under the GPL without exception.
41
42 Red Hat elfutils is an included package of the Open Invention Network.
43 An included package of the Open Invention Network is a package for which
44 Open Invention Network licensees cross-license their patents. No patent
45 license is granted, either expressly or impliedly, by designation as an
46 included package. Should you wish to participate in the Open Invention
47 Network licensing program, please visit www.openinventionnetwork.com
48 <http://www.openinventionnetwork.com>. */
49
50 #include <config.h>
51 #include "../libelf/libelfP.h" /* For NOTE_ALIGN. */
52 #undef _
53 #include "libdwflP.h"
54 #include <gelf.h>
55
56 #include <sys/param.h>
57 #include <unistd.h>
58 #include <endian.h>
59 #include <byteswap.h>
60 #include "system.h"
61
62
63 /* This is a prototype of what a new libelf interface might be.
64 This implementation is pessimal for non-mmap cases and should
65 be replaced by more diddling inside libelf internals. */
66 static Elf *
elf_begin_rand(Elf * parent,loff_t offset,loff_t size,loff_t * next)67 elf_begin_rand (Elf *parent, loff_t offset, loff_t size, loff_t *next)
68 {
69 if (parent == NULL)
70 return NULL;
71
72 /* On failure return, we update *NEXT to point back at OFFSET. */
73 inline Elf *fail (int error)
74 {
75 if (next != NULL)
76 *next = offset;
77 //__libelf_seterrno (error);
78 __libdwfl_seterrno (DWFL_E (LIBELF, error));
79 return NULL;
80 }
81
82 loff_t min = (parent->kind == ELF_K_ELF ?
83 (parent->class == ELFCLASS32
84 ? sizeof (Elf32_Ehdr) : sizeof (Elf64_Ehdr))
85 : parent->kind == ELF_K_AR ? SARMAG
86 : 0);
87
88 if (unlikely (offset < min)
89 || unlikely (offset >= (loff_t) parent->maximum_size))
90 return fail (ELF_E_RANGE);
91
92 /* For an archive, fetch just the size field
93 from the archive header to override SIZE. */
94 if (parent->kind == ELF_K_AR)
95 {
96 struct ar_hdr h = { .ar_size = "" };
97
98 if (unlikely (parent->maximum_size - offset < sizeof h))
99 return fail (ELF_E_RANGE);
100
101 if (parent->map_address != NULL)
102 memcpy (h.ar_size, parent->map_address + parent->start_offset + offset,
103 sizeof h.ar_size);
104 else if (unlikely (pread_retry (parent->fildes,
105 h.ar_size, sizeof (h.ar_size),
106 parent->start_offset + offset
107 + offsetof (struct ar_hdr, ar_size))
108 != sizeof (h.ar_size)))
109 return fail (ELF_E_READ_ERROR);
110
111 offset += sizeof h;
112
113 char *endp;
114 size = strtoll (h.ar_size, &endp, 10);
115 if (unlikely (endp == h.ar_size)
116 || unlikely ((loff_t) parent->maximum_size - offset < size))
117 return fail (ELF_E_INVALID_ARCHIVE);
118 }
119
120 if (unlikely ((loff_t) parent->maximum_size - offset < size))
121 return fail (ELF_E_RANGE);
122
123 /* Even if we fail at this point, update *NEXT to point past the file. */
124 if (next != NULL)
125 *next = offset + size;
126
127 if (unlikely (offset == 0)
128 && unlikely (size == (loff_t) parent->maximum_size))
129 return elf_clone (parent, parent->cmd);
130
131 /* Note the image is guaranteed live only as long as PARENT
132 lives. Using elf_memory is quite suboptimal if the whole
133 file is not mmap'd. We really should have something like
134 a generalization of the archive support. */
135 Elf_Data *data = elf_getdata_rawchunk (parent, offset, size, ELF_T_BYTE);
136 if (data == NULL)
137 return NULL;
138 assert ((loff_t) data->d_size == size);
139 return elf_memory (data->d_buf, size);
140 }
141
142
143 int
dwfl_report_core_segments(Dwfl * dwfl,Elf * elf,const GElf_Ehdr * ehdr,GElf_Phdr * notes)144 dwfl_report_core_segments (Dwfl *dwfl, Elf *elf, const GElf_Ehdr *ehdr,
145 GElf_Phdr *notes)
146 {
147 if (unlikely (dwfl == NULL))
148 return -1;
149
150 if (unlikely (elf == NULL) || unlikely (ehdr == NULL))
151 {
152 __libdw_seterrno (DWFL_E_LIBELF);
153 return -1;
154 }
155
156 int result = 0;
157
158 if (notes != NULL)
159 notes->p_type = PT_NULL;
160
161 for (int ndx = 0; result >= 0 && ndx < ehdr->e_phnum; ++ndx)
162 {
163 GElf_Phdr phdr_mem;
164 GElf_Phdr *phdr = gelf_getphdr (elf, ndx, &phdr_mem);
165 if (unlikely (phdr == NULL))
166 {
167 __libdwfl_seterrno (DWFL_E_LIBELF);
168 return -1;
169 }
170 switch (phdr->p_type)
171 {
172 case PT_LOAD:
173 result = dwfl_report_segment (dwfl, ndx, phdr, 0, NULL);
174 break;
175
176 case PT_NOTE:
177 if (notes != NULL)
178 {
179 *notes = *phdr;
180 notes = NULL;
181 }
182 break;
183 }
184 }
185
186 return result;
187 }
188
189 /* Never read more than this much without mmap. */
190 #define MAX_EAGER_COST 8192
191
192 static bool
core_file_read_eagerly(Dwfl_Module * mod,void ** userdata,const char * name,Dwarf_Addr start,void ** buffer,size_t * buffer_available,GElf_Off cost,GElf_Off worthwhile,GElf_Off whole,GElf_Off contiguous,void * arg,Elf ** elfp)193 core_file_read_eagerly (Dwfl_Module *mod,
194 void **userdata __attribute__ ((unused)),
195 const char *name __attribute__ ((unused)),
196 Dwarf_Addr start __attribute__ ((unused)),
197 void **buffer, size_t *buffer_available,
198 GElf_Off cost, GElf_Off worthwhile,
199 GElf_Off whole,
200 GElf_Off contiguous __attribute__ ((unused)),
201 void *arg, Elf **elfp)
202 {
203 Elf *core = arg;
204
205 if (whole <= *buffer_available)
206 {
207 /* All there ever was, we already have on hand. */
208
209 if (core->map_address == NULL)
210 {
211 /* We already malloc'd the buffer. */
212 *elfp = elf_memory (*buffer, whole);
213 if (unlikely (*elfp == NULL))
214 return false;
215
216 (*elfp)->flags |= ELF_F_MALLOCED;
217 *buffer = NULL;
218 *buffer_available = 0;
219 return true;
220 }
221
222 /* We can use the image inside the core file directly. */
223 *elfp = elf_begin_rand (core, *buffer - core->map_address, whole, NULL);
224 *buffer = NULL;
225 *buffer_available = 0;
226 return *elfp != NULL;
227 }
228
229 /* We don't have the whole file.
230 Figure out if this is better than nothing. */
231
232 if (worthwhile == 0)
233 /* Caller doesn't think so. */
234 return false;
235
236 /*
237 XXX would like to fall back to partial file via memory
238 when build id find_elf fails
239 also, link_map name may give file name from disk better than partial here
240 requires find_elf hook re-doing the magic to fall back if no file found
241 */
242
243 if (mod->build_id_len > 0)
244 /* There is a build ID that could help us find the whole file,
245 which might be more useful than what we have.
246 We'll just rely on that. */
247 return false;
248
249 if (core->map_address != NULL)
250 /* It's cheap to get, so get it. */
251 return true;
252
253 /* Only use it if there isn't too much to be read. */
254 return cost <= MAX_EAGER_COST;
255 }
256
257 bool
dwfl_elf_phdr_memory_callback(Dwfl * dwfl,int ndx,void ** buffer,size_t * buffer_available,GElf_Addr vaddr,size_t minread,void * arg)258 dwfl_elf_phdr_memory_callback (Dwfl *dwfl, int ndx,
259 void **buffer, size_t *buffer_available,
260 GElf_Addr vaddr,
261 size_t minread,
262 void *arg)
263 {
264 Elf *elf = arg;
265
266 if (ndx == -1)
267 {
268 /* Called for cleanup. */
269 if (elf->map_address == NULL)
270 free (*buffer);
271 *buffer = NULL;
272 *buffer_available = 0;
273 return false;
274 }
275
276 const GElf_Off align = dwfl->segment_align ?: 1;
277 GElf_Phdr phdr;
278
279 do
280 if (unlikely (gelf_getphdr (elf, ndx++, &phdr) == NULL))
281 return true;
282 while (phdr.p_type != PT_LOAD
283 || ((phdr.p_vaddr + phdr.p_memsz + align - 1) & -align) <= vaddr);
284
285 GElf_Off start = vaddr - phdr.p_vaddr + phdr.p_offset;
286 GElf_Off end = (phdr.p_offset + phdr.p_filesz + align - 1) & -align;
287
288 /* Use following contiguous segments to get towards SIZE. */
289 inline bool more (size_t size)
290 {
291 while (end <= start || end - start < size)
292 {
293 if (phdr.p_filesz < phdr.p_memsz)
294 /* This segment is truncated, so no following one helps us. */
295 return false;
296
297 if (unlikely (gelf_getphdr (elf, ndx++, &phdr) == NULL))
298 return false;
299
300 if (phdr.p_type == PT_LOAD)
301 {
302 if (phdr.p_offset > end)
303 /* It's discontiguous! */
304 return false;
305
306 end = (phdr.p_offset + phdr.p_filesz + align - 1) & -align;
307 }
308 }
309 return true;
310 }
311
312 /* We need at least this much. */
313 if (! more (minread))
314 return false;
315
316 /* See how much more we can get of what the caller wants. */
317 (void) more (*buffer_available);
318
319 /* If it's already on hand anyway, use as much as there is. */
320 if (elf->map_address != NULL)
321 (void) more (elf->maximum_size - start);
322
323 if (unlikely (end - start > elf->maximum_size))
324 end = start + elf->maximum_size;
325
326 if (elf->map_address != NULL)
327 {
328 void *contents = elf->map_address + elf->start_offset + start;
329 size_t size = end - start;
330
331 if (minread == 0) /* String mode. */
332 {
333 const void *eos = memchr (contents, '\0', size);
334 if (unlikely (eos == NULL) || unlikely (eos == contents))
335 return false;
336 size = eos + 1 - contents;
337 }
338
339 if (*buffer == NULL)
340 {
341 *buffer = contents;
342 *buffer_available = size;
343 }
344 else
345 {
346 *buffer_available = MIN (size, *buffer_available);
347 memcpy (*buffer, contents, *buffer_available);
348 }
349 }
350 else
351 {
352 void *into = *buffer;
353 if (*buffer == NULL)
354 {
355 *buffer_available = MIN (minread ?: 512,
356 MAX (4096, MIN (end - start,
357 *buffer_available)));
358 into = malloc (*buffer_available);
359 if (unlikely (into == NULL))
360 {
361 __libdwfl_seterrno (DWFL_E_NOMEM);
362 return false;
363 }
364 }
365
366 ssize_t nread = pread_retry (elf->fildes, into, *buffer_available, start);
367 if (nread < (ssize_t) minread)
368 {
369 if (into != *buffer)
370 free (into);
371 if (nread < 0)
372 __libdwfl_seterrno (DWFL_E_ERRNO);
373 return false;
374 }
375
376 if (minread == 0) /* String mode. */
377 {
378 const void *eos = memchr (into, '\0', nread);
379 if (unlikely (eos == NULL) || unlikely (eos == into))
380 {
381 if (*buffer == NULL)
382 free (into);
383 return false;
384 }
385 nread = eos + 1 - into;
386 }
387
388 if (*buffer == NULL)
389 *buffer = into;
390 *buffer_available = nread;
391 }
392
393 return true;
394 }
395
396 int
dwfl_core_file_report(Dwfl * dwfl,Elf * elf,const GElf_Ehdr * ehdr)397 dwfl_core_file_report (Dwfl *dwfl, Elf *elf, const GElf_Ehdr *ehdr)
398 {
399 GElf_Phdr notes_phdr;
400
401 /* First report each PT_LOAD segment. */
402 int ndx = dwfl_report_core_segments (dwfl, elf, ehdr, ¬es_phdr);
403 if (unlikely (ndx <= 0))
404 return ndx;
405
406 /* Now sniff segment contents for modules. */
407 ndx = 0;
408 do
409 {
410 int seg = dwfl_segment_report_module (dwfl, ndx, NULL,
411 &dwfl_elf_phdr_memory_callback, elf,
412 core_file_read_eagerly, elf);
413 if (unlikely (seg < 0))
414 return seg;
415 ndx = seg > ndx ? seg : ndx + 1;
416 }
417 while (ndx < ehdr->e_phnum);
418
419 /* Next, we should follow the chain from DT_DEBUG. */
420
421 const void *auxv = NULL;
422 size_t auxv_size = 0;
423 if (likely (notes_phdr.p_type == PT_NOTE))
424 {
425 /* PT_NOTE -> NT_AUXV -> AT_PHDR -> PT_DYNAMIC -> DT_DEBUG */
426
427 Elf_Data *notes = elf_getdata_rawchunk (elf,
428 notes_phdr.p_offset,
429 notes_phdr.p_filesz,
430 ELF_T_NHDR);
431 if (likely (notes != NULL))
432 {
433 size_t pos = 0;
434 GElf_Nhdr nhdr;
435 size_t name_pos;
436 size_t desc_pos;
437 while ((pos = gelf_getnote (notes, pos, &nhdr,
438 &name_pos, &desc_pos)) > 0)
439 if (nhdr.n_type == NT_AUXV
440 && nhdr.n_namesz == sizeof "CORE"
441 && !memcmp (notes->d_buf + name_pos, "CORE", sizeof "CORE"))
442 {
443 auxv = notes->d_buf + desc_pos;
444 auxv_size = nhdr.n_descsz;
445 break;
446 }
447 }
448 }
449
450 /* Now we have NT_AUXV contents. From here on this processing could be
451 used for a live process with auxv read from /proc. */
452
453 return dwfl_link_map_report (dwfl, auxv, auxv_size,
454 dwfl_elf_phdr_memory_callback, elf);
455 }
456 INTDEF (dwfl_core_file_report)
457