1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include "internal.h"
28
29 /* List representing chunks of contiguous memory areas and their offsets in
30 * vmcore file.
31 */
32 static LIST_HEAD(vmcore_list);
33
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
38
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
41
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
44
45 static struct proc_dir_entry *proc_vmcore;
46
47 /*
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
50 */
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
52
register_oldmem_pfn_is_ram(int (* fn)(unsigned long pfn))53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
54 {
55 if (oldmem_pfn_is_ram)
56 return -EBUSY;
57 oldmem_pfn_is_ram = fn;
58 return 0;
59 }
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
61
unregister_oldmem_pfn_is_ram(void)62 void unregister_oldmem_pfn_is_ram(void)
63 {
64 oldmem_pfn_is_ram = NULL;
65 wmb();
66 }
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
68
pfn_is_ram(unsigned long pfn)69 static int pfn_is_ram(unsigned long pfn)
70 {
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
73 int ret = 1;
74
75 /*
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
79 */
80 fn = oldmem_pfn_is_ram;
81 if (fn)
82 ret = fn(pfn);
83
84 return ret;
85 }
86
87 /* Reads a page from the oldmem device from given offset. */
read_from_oldmem(char * buf,size_t count,u64 * ppos,int userbuf)88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
90 {
91 unsigned long pfn, offset;
92 size_t nr_bytes;
93 ssize_t read = 0, tmp;
94
95 if (!count)
96 return 0;
97
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
100
101 do {
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
104 else
105 nr_bytes = count;
106
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0) {
109 tmp = 0;
110 if (!userbuf)
111 memset(buf, 0, nr_bytes);
112 else if (clear_user(buf, nr_bytes))
113 tmp = -EFAULT;
114 } else {
115 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
116 offset, userbuf);
117 }
118 if (tmp < 0)
119 return tmp;
120
121 *ppos += nr_bytes;
122 count -= nr_bytes;
123 buf += nr_bytes;
124 read += nr_bytes;
125 ++pfn;
126 offset = 0;
127 } while (count);
128
129 return read;
130 }
131
132 /*
133 * Architectures may override this function to allocate ELF header in 2nd kernel
134 */
elfcorehdr_alloc(unsigned long long * addr,unsigned long long * size)135 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
136 {
137 return 0;
138 }
139
140 /*
141 * Architectures may override this function to free header
142 */
elfcorehdr_free(unsigned long long addr)143 void __weak elfcorehdr_free(unsigned long long addr)
144 {}
145
146 /*
147 * Architectures may override this function to read from ELF header
148 */
elfcorehdr_read(char * buf,size_t count,u64 * ppos)149 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
150 {
151 return read_from_oldmem(buf, count, ppos, 0);
152 }
153
154 /*
155 * Architectures may override this function to read from notes sections
156 */
elfcorehdr_read_notes(char * buf,size_t count,u64 * ppos)157 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
158 {
159 return read_from_oldmem(buf, count, ppos, 0);
160 }
161
162 /*
163 * Architectures may override this function to map oldmem
164 */
remap_oldmem_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)165 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
166 unsigned long from, unsigned long pfn,
167 unsigned long size, pgprot_t prot)
168 {
169 return remap_pfn_range(vma, from, pfn, size, prot);
170 }
171
172 /*
173 * Architectures which support memory encryption override this.
174 */
175 ssize_t __weak
copy_oldmem_page_encrypted(unsigned long pfn,char * buf,size_t csize,unsigned long offset,int userbuf)176 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
177 unsigned long offset, int userbuf)
178 {
179 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
180 }
181
182 /*
183 * Copy to either kernel or user space
184 */
copy_to(void * target,void * src,size_t size,int userbuf)185 static int copy_to(void *target, void *src, size_t size, int userbuf)
186 {
187 if (userbuf) {
188 if (copy_to_user((char __user *) target, src, size))
189 return -EFAULT;
190 } else {
191 memcpy(target, src, size);
192 }
193 return 0;
194 }
195
196 /* Read from the ELF header and then the crash dump. On error, negative value is
197 * returned otherwise number of bytes read are returned.
198 */
__read_vmcore(char * buffer,size_t buflen,loff_t * fpos,int userbuf)199 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
200 int userbuf)
201 {
202 ssize_t acc = 0, tmp;
203 size_t tsz;
204 u64 start;
205 struct vmcore *m = NULL;
206
207 if (buflen == 0 || *fpos >= vmcore_size)
208 return 0;
209
210 /* trim buflen to not go beyond EOF */
211 if (buflen > vmcore_size - *fpos)
212 buflen = vmcore_size - *fpos;
213
214 /* Read ELF core header */
215 if (*fpos < elfcorebuf_sz) {
216 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
217 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
218 return -EFAULT;
219 buflen -= tsz;
220 *fpos += tsz;
221 buffer += tsz;
222 acc += tsz;
223
224 /* leave now if filled buffer already */
225 if (buflen == 0)
226 return acc;
227 }
228
229 /* Read Elf note segment */
230 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
231 void *kaddr;
232
233 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
234 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
235 if (copy_to(buffer, kaddr, tsz, userbuf))
236 return -EFAULT;
237 buflen -= tsz;
238 *fpos += tsz;
239 buffer += tsz;
240 acc += tsz;
241
242 /* leave now if filled buffer already */
243 if (buflen == 0)
244 return acc;
245 }
246
247 list_for_each_entry(m, &vmcore_list, list) {
248 if (*fpos < m->offset + m->size) {
249 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
250 start = m->paddr + *fpos - m->offset;
251 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
252 if (tmp < 0)
253 return tmp;
254 buflen -= tsz;
255 *fpos += tsz;
256 buffer += tsz;
257 acc += tsz;
258
259 /* leave now if filled buffer already */
260 if (buflen == 0)
261 return acc;
262 }
263 }
264
265 return acc;
266 }
267
read_vmcore(struct file * file,char __user * buffer,size_t buflen,loff_t * fpos)268 static ssize_t read_vmcore(struct file *file, char __user *buffer,
269 size_t buflen, loff_t *fpos)
270 {
271 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
272 }
273
274 /*
275 * The vmcore fault handler uses the page cache and fills data using the
276 * standard __vmcore_read() function.
277 *
278 * On s390 the fault handler is used for memory regions that can't be mapped
279 * directly with remap_pfn_range().
280 */
mmap_vmcore_fault(struct vm_area_struct * vma,struct vm_fault * vmf)281 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
282 {
283 #ifdef CONFIG_S390
284 struct address_space *mapping = vma->vm_file->f_mapping;
285 pgoff_t index = vmf->pgoff;
286 struct page *page;
287 loff_t offset;
288 char *buf;
289 int rc;
290
291 page = find_or_create_page(mapping, index, GFP_KERNEL);
292 if (!page)
293 return VM_FAULT_OOM;
294 if (!PageUptodate(page)) {
295 offset = (loff_t) index << PAGE_CACHE_SHIFT;
296 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
297 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
298 if (rc < 0) {
299 unlock_page(page);
300 page_cache_release(page);
301 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
302 }
303 SetPageUptodate(page);
304 }
305 unlock_page(page);
306 vmf->page = page;
307 return 0;
308 #else
309 return VM_FAULT_SIGBUS;
310 #endif
311 }
312
313 static const struct vm_operations_struct vmcore_mmap_ops = {
314 .fault = mmap_vmcore_fault,
315 };
316
317 /**
318 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
319 * vmalloc memory
320 *
321 * @notes_sz: size of buffer
322 *
323 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
324 * the buffer to user-space by means of remap_vmalloc_range().
325 *
326 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
327 * disabled and there's no need to allow users to mmap the buffer.
328 */
alloc_elfnotes_buf(size_t notes_sz)329 static inline char *alloc_elfnotes_buf(size_t notes_sz)
330 {
331 #ifdef CONFIG_MMU
332 return vmalloc_user(notes_sz);
333 #else
334 return vzalloc(notes_sz);
335 #endif
336 }
337
338 /*
339 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
340 * essential for mmap_vmcore() in order to map physically
341 * non-contiguous objects (ELF header, ELF note segment and memory
342 * regions in the 1st kernel pointed to by PT_LOAD entries) into
343 * virtually contiguous user-space in ELF layout.
344 */
345 #ifdef CONFIG_MMU
346 /*
347 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
348 * reported as not being ram with the zero page.
349 *
350 * @vma: vm_area_struct describing requested mapping
351 * @from: start remapping from
352 * @pfn: page frame number to start remapping to
353 * @size: remapping size
354 * @prot: protection bits
355 *
356 * Returns zero on success, -EAGAIN on failure.
357 */
remap_oldmem_pfn_checked(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)358 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
359 unsigned long from, unsigned long pfn,
360 unsigned long size, pgprot_t prot)
361 {
362 unsigned long map_size;
363 unsigned long pos_start, pos_end, pos;
364 unsigned long zeropage_pfn = my_zero_pfn(0);
365 size_t len = 0;
366
367 pos_start = pfn;
368 pos_end = pfn + (size >> PAGE_SHIFT);
369
370 for (pos = pos_start; pos < pos_end; ++pos) {
371 if (!pfn_is_ram(pos)) {
372 /*
373 * We hit a page which is not ram. Remap the continuous
374 * region between pos_start and pos-1 and replace
375 * the non-ram page at pos with the zero page.
376 */
377 if (pos > pos_start) {
378 /* Remap continuous region */
379 map_size = (pos - pos_start) << PAGE_SHIFT;
380 if (remap_oldmem_pfn_range(vma, from + len,
381 pos_start, map_size,
382 prot))
383 goto fail;
384 len += map_size;
385 }
386 /* Remap the zero page */
387 if (remap_oldmem_pfn_range(vma, from + len,
388 zeropage_pfn,
389 PAGE_SIZE, prot))
390 goto fail;
391 len += PAGE_SIZE;
392 pos_start = pos + 1;
393 }
394 }
395 if (pos > pos_start) {
396 /* Remap the rest */
397 map_size = (pos - pos_start) << PAGE_SHIFT;
398 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
399 map_size, prot))
400 goto fail;
401 }
402 return 0;
403 fail:
404 do_munmap(vma->vm_mm, from, len);
405 return -EAGAIN;
406 }
407
vmcore_remap_oldmem_pfn(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)408 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
409 unsigned long from, unsigned long pfn,
410 unsigned long size, pgprot_t prot)
411 {
412 /*
413 * Check if oldmem_pfn_is_ram was registered to avoid
414 * looping over all pages without a reason.
415 */
416 if (oldmem_pfn_is_ram)
417 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
418 else
419 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
420 }
421
mmap_vmcore(struct file * file,struct vm_area_struct * vma)422 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
423 {
424 size_t size = vma->vm_end - vma->vm_start;
425 u64 start, end, len, tsz;
426 struct vmcore *m;
427
428 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
429 end = start + size;
430
431 if (size > vmcore_size || end > vmcore_size)
432 return -EINVAL;
433
434 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
435 return -EPERM;
436
437 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
438 vma->vm_flags |= VM_MIXEDMAP;
439 vma->vm_ops = &vmcore_mmap_ops;
440
441 len = 0;
442
443 if (start < elfcorebuf_sz) {
444 u64 pfn;
445
446 tsz = min(elfcorebuf_sz - (size_t)start, size);
447 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
448 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
449 vma->vm_page_prot))
450 return -EAGAIN;
451 size -= tsz;
452 start += tsz;
453 len += tsz;
454
455 if (size == 0)
456 return 0;
457 }
458
459 if (start < elfcorebuf_sz + elfnotes_sz) {
460 void *kaddr;
461
462 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
463 kaddr = elfnotes_buf + start - elfcorebuf_sz;
464 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
465 kaddr, tsz))
466 goto fail;
467 size -= tsz;
468 start += tsz;
469 len += tsz;
470
471 if (size == 0)
472 return 0;
473 }
474
475 list_for_each_entry(m, &vmcore_list, list) {
476 if (start < m->offset + m->size) {
477 u64 paddr = 0;
478
479 tsz = min_t(size_t, m->offset + m->size - start, size);
480 paddr = m->paddr + start - m->offset;
481 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
482 paddr >> PAGE_SHIFT, tsz,
483 vma->vm_page_prot))
484 goto fail;
485 size -= tsz;
486 start += tsz;
487 len += tsz;
488
489 if (size == 0)
490 return 0;
491 }
492 }
493
494 return 0;
495 fail:
496 do_munmap(vma->vm_mm, vma->vm_start, len);
497 return -EAGAIN;
498 }
499 #else
mmap_vmcore(struct file * file,struct vm_area_struct * vma)500 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
501 {
502 return -ENOSYS;
503 }
504 #endif
505
506 static const struct file_operations proc_vmcore_operations = {
507 .read = read_vmcore,
508 .llseek = default_llseek,
509 .mmap = mmap_vmcore,
510 };
511
get_new_element(void)512 static struct vmcore* __init get_new_element(void)
513 {
514 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
515 }
516
get_vmcore_size(size_t elfsz,size_t elfnotesegsz,struct list_head * vc_list)517 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
518 struct list_head *vc_list)
519 {
520 u64 size;
521 struct vmcore *m;
522
523 size = elfsz + elfnotesegsz;
524 list_for_each_entry(m, vc_list, list) {
525 size += m->size;
526 }
527 return size;
528 }
529
530 /**
531 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
532 *
533 * @ehdr_ptr: ELF header
534 *
535 * This function updates p_memsz member of each PT_NOTE entry in the
536 * program header table pointed to by @ehdr_ptr to real size of ELF
537 * note segment.
538 */
update_note_header_size_elf64(const Elf64_Ehdr * ehdr_ptr)539 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
540 {
541 int i, rc=0;
542 Elf64_Phdr *phdr_ptr;
543 Elf64_Nhdr *nhdr_ptr;
544
545 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
546 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
547 void *notes_section;
548 u64 offset, max_sz, sz, real_sz = 0;
549 if (phdr_ptr->p_type != PT_NOTE)
550 continue;
551 max_sz = phdr_ptr->p_memsz;
552 offset = phdr_ptr->p_offset;
553 notes_section = kmalloc(max_sz, GFP_KERNEL);
554 if (!notes_section)
555 return -ENOMEM;
556 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
557 if (rc < 0) {
558 kfree(notes_section);
559 return rc;
560 }
561 nhdr_ptr = notes_section;
562 while (nhdr_ptr->n_namesz != 0) {
563 sz = sizeof(Elf64_Nhdr) +
564 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
565 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
566 if ((real_sz + sz) > max_sz) {
567 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
568 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
569 break;
570 }
571 real_sz += sz;
572 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
573 }
574 kfree(notes_section);
575 phdr_ptr->p_memsz = real_sz;
576 if (real_sz == 0) {
577 pr_warn("Warning: Zero PT_NOTE entries found\n");
578 }
579 }
580
581 return 0;
582 }
583
584 /**
585 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
586 * headers and sum of real size of their ELF note segment headers and
587 * data.
588 *
589 * @ehdr_ptr: ELF header
590 * @nr_ptnote: buffer for the number of PT_NOTE program headers
591 * @sz_ptnote: buffer for size of unique PT_NOTE program header
592 *
593 * This function is used to merge multiple PT_NOTE program headers
594 * into a unique single one. The resulting unique entry will have
595 * @sz_ptnote in its phdr->p_mem.
596 *
597 * It is assumed that program headers with PT_NOTE type pointed to by
598 * @ehdr_ptr has already been updated by update_note_header_size_elf64
599 * and each of PT_NOTE program headers has actual ELF note segment
600 * size in its p_memsz member.
601 */
get_note_number_and_size_elf64(const Elf64_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)602 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
603 int *nr_ptnote, u64 *sz_ptnote)
604 {
605 int i;
606 Elf64_Phdr *phdr_ptr;
607
608 *nr_ptnote = *sz_ptnote = 0;
609
610 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
611 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
612 if (phdr_ptr->p_type != PT_NOTE)
613 continue;
614 *nr_ptnote += 1;
615 *sz_ptnote += phdr_ptr->p_memsz;
616 }
617
618 return 0;
619 }
620
621 /**
622 * copy_notes_elf64 - copy ELF note segments in a given buffer
623 *
624 * @ehdr_ptr: ELF header
625 * @notes_buf: buffer into which ELF note segments are copied
626 *
627 * This function is used to copy ELF note segment in the 1st kernel
628 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
629 * size of the buffer @notes_buf is equal to or larger than sum of the
630 * real ELF note segment headers and data.
631 *
632 * It is assumed that program headers with PT_NOTE type pointed to by
633 * @ehdr_ptr has already been updated by update_note_header_size_elf64
634 * and each of PT_NOTE program headers has actual ELF note segment
635 * size in its p_memsz member.
636 */
copy_notes_elf64(const Elf64_Ehdr * ehdr_ptr,char * notes_buf)637 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
638 {
639 int i, rc=0;
640 Elf64_Phdr *phdr_ptr;
641
642 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
643
644 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
645 u64 offset;
646 if (phdr_ptr->p_type != PT_NOTE)
647 continue;
648 offset = phdr_ptr->p_offset;
649 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
650 &offset);
651 if (rc < 0)
652 return rc;
653 notes_buf += phdr_ptr->p_memsz;
654 }
655
656 return 0;
657 }
658
659 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf64(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)660 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
661 char **notes_buf, size_t *notes_sz)
662 {
663 int i, nr_ptnote=0, rc=0;
664 char *tmp;
665 Elf64_Ehdr *ehdr_ptr;
666 Elf64_Phdr phdr;
667 u64 phdr_sz = 0, note_off;
668
669 ehdr_ptr = (Elf64_Ehdr *)elfptr;
670
671 rc = update_note_header_size_elf64(ehdr_ptr);
672 if (rc < 0)
673 return rc;
674
675 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
676 if (rc < 0)
677 return rc;
678
679 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
680 *notes_buf = alloc_elfnotes_buf(*notes_sz);
681 if (!*notes_buf)
682 return -ENOMEM;
683
684 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
685 if (rc < 0)
686 return rc;
687
688 /* Prepare merged PT_NOTE program header. */
689 phdr.p_type = PT_NOTE;
690 phdr.p_flags = 0;
691 note_off = sizeof(Elf64_Ehdr) +
692 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
693 phdr.p_offset = roundup(note_off, PAGE_SIZE);
694 phdr.p_vaddr = phdr.p_paddr = 0;
695 phdr.p_filesz = phdr.p_memsz = phdr_sz;
696 phdr.p_align = 0;
697
698 /* Add merged PT_NOTE program header*/
699 tmp = elfptr + sizeof(Elf64_Ehdr);
700 memcpy(tmp, &phdr, sizeof(phdr));
701 tmp += sizeof(phdr);
702
703 /* Remove unwanted PT_NOTE program headers. */
704 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
705 *elfsz = *elfsz - i;
706 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
707 memset(elfptr + *elfsz, 0, i);
708 *elfsz = roundup(*elfsz, PAGE_SIZE);
709
710 /* Modify e_phnum to reflect merged headers. */
711 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
712
713 return 0;
714 }
715
716 /**
717 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
718 *
719 * @ehdr_ptr: ELF header
720 *
721 * This function updates p_memsz member of each PT_NOTE entry in the
722 * program header table pointed to by @ehdr_ptr to real size of ELF
723 * note segment.
724 */
update_note_header_size_elf32(const Elf32_Ehdr * ehdr_ptr)725 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
726 {
727 int i, rc=0;
728 Elf32_Phdr *phdr_ptr;
729 Elf32_Nhdr *nhdr_ptr;
730
731 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
732 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
733 void *notes_section;
734 u64 offset, max_sz, sz, real_sz = 0;
735 if (phdr_ptr->p_type != PT_NOTE)
736 continue;
737 max_sz = phdr_ptr->p_memsz;
738 offset = phdr_ptr->p_offset;
739 notes_section = kmalloc(max_sz, GFP_KERNEL);
740 if (!notes_section)
741 return -ENOMEM;
742 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
743 if (rc < 0) {
744 kfree(notes_section);
745 return rc;
746 }
747 nhdr_ptr = notes_section;
748 while (nhdr_ptr->n_namesz != 0) {
749 sz = sizeof(Elf32_Nhdr) +
750 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
751 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
752 if ((real_sz + sz) > max_sz) {
753 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
754 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
755 break;
756 }
757 real_sz += sz;
758 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
759 }
760 kfree(notes_section);
761 phdr_ptr->p_memsz = real_sz;
762 if (real_sz == 0) {
763 pr_warn("Warning: Zero PT_NOTE entries found\n");
764 }
765 }
766
767 return 0;
768 }
769
770 /**
771 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
772 * headers and sum of real size of their ELF note segment headers and
773 * data.
774 *
775 * @ehdr_ptr: ELF header
776 * @nr_ptnote: buffer for the number of PT_NOTE program headers
777 * @sz_ptnote: buffer for size of unique PT_NOTE program header
778 *
779 * This function is used to merge multiple PT_NOTE program headers
780 * into a unique single one. The resulting unique entry will have
781 * @sz_ptnote in its phdr->p_mem.
782 *
783 * It is assumed that program headers with PT_NOTE type pointed to by
784 * @ehdr_ptr has already been updated by update_note_header_size_elf32
785 * and each of PT_NOTE program headers has actual ELF note segment
786 * size in its p_memsz member.
787 */
get_note_number_and_size_elf32(const Elf32_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)788 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
789 int *nr_ptnote, u64 *sz_ptnote)
790 {
791 int i;
792 Elf32_Phdr *phdr_ptr;
793
794 *nr_ptnote = *sz_ptnote = 0;
795
796 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
797 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
798 if (phdr_ptr->p_type != PT_NOTE)
799 continue;
800 *nr_ptnote += 1;
801 *sz_ptnote += phdr_ptr->p_memsz;
802 }
803
804 return 0;
805 }
806
807 /**
808 * copy_notes_elf32 - copy ELF note segments in a given buffer
809 *
810 * @ehdr_ptr: ELF header
811 * @notes_buf: buffer into which ELF note segments are copied
812 *
813 * This function is used to copy ELF note segment in the 1st kernel
814 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
815 * size of the buffer @notes_buf is equal to or larger than sum of the
816 * real ELF note segment headers and data.
817 *
818 * It is assumed that program headers with PT_NOTE type pointed to by
819 * @ehdr_ptr has already been updated by update_note_header_size_elf32
820 * and each of PT_NOTE program headers has actual ELF note segment
821 * size in its p_memsz member.
822 */
copy_notes_elf32(const Elf32_Ehdr * ehdr_ptr,char * notes_buf)823 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
824 {
825 int i, rc=0;
826 Elf32_Phdr *phdr_ptr;
827
828 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
829
830 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
831 u64 offset;
832 if (phdr_ptr->p_type != PT_NOTE)
833 continue;
834 offset = phdr_ptr->p_offset;
835 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
836 &offset);
837 if (rc < 0)
838 return rc;
839 notes_buf += phdr_ptr->p_memsz;
840 }
841
842 return 0;
843 }
844
845 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf32(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)846 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
847 char **notes_buf, size_t *notes_sz)
848 {
849 int i, nr_ptnote=0, rc=0;
850 char *tmp;
851 Elf32_Ehdr *ehdr_ptr;
852 Elf32_Phdr phdr;
853 u64 phdr_sz = 0, note_off;
854
855 ehdr_ptr = (Elf32_Ehdr *)elfptr;
856
857 rc = update_note_header_size_elf32(ehdr_ptr);
858 if (rc < 0)
859 return rc;
860
861 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
862 if (rc < 0)
863 return rc;
864
865 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
866 *notes_buf = alloc_elfnotes_buf(*notes_sz);
867 if (!*notes_buf)
868 return -ENOMEM;
869
870 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
871 if (rc < 0)
872 return rc;
873
874 /* Prepare merged PT_NOTE program header. */
875 phdr.p_type = PT_NOTE;
876 phdr.p_flags = 0;
877 note_off = sizeof(Elf32_Ehdr) +
878 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
879 phdr.p_offset = roundup(note_off, PAGE_SIZE);
880 phdr.p_vaddr = phdr.p_paddr = 0;
881 phdr.p_filesz = phdr.p_memsz = phdr_sz;
882 phdr.p_align = 0;
883
884 /* Add merged PT_NOTE program header*/
885 tmp = elfptr + sizeof(Elf32_Ehdr);
886 memcpy(tmp, &phdr, sizeof(phdr));
887 tmp += sizeof(phdr);
888
889 /* Remove unwanted PT_NOTE program headers. */
890 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
891 *elfsz = *elfsz - i;
892 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
893 memset(elfptr + *elfsz, 0, i);
894 *elfsz = roundup(*elfsz, PAGE_SIZE);
895
896 /* Modify e_phnum to reflect merged headers. */
897 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
898
899 return 0;
900 }
901
902 /* Add memory chunks represented by program headers to vmcore list. Also update
903 * the new offset fields of exported program headers. */
process_ptload_program_headers_elf64(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)904 static int __init process_ptload_program_headers_elf64(char *elfptr,
905 size_t elfsz,
906 size_t elfnotes_sz,
907 struct list_head *vc_list)
908 {
909 int i;
910 Elf64_Ehdr *ehdr_ptr;
911 Elf64_Phdr *phdr_ptr;
912 loff_t vmcore_off;
913 struct vmcore *new;
914
915 ehdr_ptr = (Elf64_Ehdr *)elfptr;
916 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
917
918 /* Skip Elf header, program headers and Elf note segment. */
919 vmcore_off = elfsz + elfnotes_sz;
920
921 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
922 u64 paddr, start, end, size;
923
924 if (phdr_ptr->p_type != PT_LOAD)
925 continue;
926
927 paddr = phdr_ptr->p_offset;
928 start = rounddown(paddr, PAGE_SIZE);
929 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
930 size = end - start;
931
932 /* Add this contiguous chunk of memory to vmcore list.*/
933 new = get_new_element();
934 if (!new)
935 return -ENOMEM;
936 new->paddr = start;
937 new->size = size;
938 list_add_tail(&new->list, vc_list);
939
940 /* Update the program header offset. */
941 phdr_ptr->p_offset = vmcore_off + (paddr - start);
942 vmcore_off = vmcore_off + size;
943 }
944 return 0;
945 }
946
process_ptload_program_headers_elf32(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)947 static int __init process_ptload_program_headers_elf32(char *elfptr,
948 size_t elfsz,
949 size_t elfnotes_sz,
950 struct list_head *vc_list)
951 {
952 int i;
953 Elf32_Ehdr *ehdr_ptr;
954 Elf32_Phdr *phdr_ptr;
955 loff_t vmcore_off;
956 struct vmcore *new;
957
958 ehdr_ptr = (Elf32_Ehdr *)elfptr;
959 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
960
961 /* Skip Elf header, program headers and Elf note segment. */
962 vmcore_off = elfsz + elfnotes_sz;
963
964 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
965 u64 paddr, start, end, size;
966
967 if (phdr_ptr->p_type != PT_LOAD)
968 continue;
969
970 paddr = phdr_ptr->p_offset;
971 start = rounddown(paddr, PAGE_SIZE);
972 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
973 size = end - start;
974
975 /* Add this contiguous chunk of memory to vmcore list.*/
976 new = get_new_element();
977 if (!new)
978 return -ENOMEM;
979 new->paddr = start;
980 new->size = size;
981 list_add_tail(&new->list, vc_list);
982
983 /* Update the program header offset */
984 phdr_ptr->p_offset = vmcore_off + (paddr - start);
985 vmcore_off = vmcore_off + size;
986 }
987 return 0;
988 }
989
990 /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets(size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)991 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
992 struct list_head *vc_list)
993 {
994 loff_t vmcore_off;
995 struct vmcore *m;
996
997 /* Skip Elf header, program headers and Elf note segment. */
998 vmcore_off = elfsz + elfnotes_sz;
999
1000 list_for_each_entry(m, vc_list, list) {
1001 m->offset = vmcore_off;
1002 vmcore_off += m->size;
1003 }
1004 }
1005
free_elfcorebuf(void)1006 static void free_elfcorebuf(void)
1007 {
1008 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1009 elfcorebuf = NULL;
1010 vfree(elfnotes_buf);
1011 elfnotes_buf = NULL;
1012 }
1013
parse_crash_elf64_headers(void)1014 static int __init parse_crash_elf64_headers(void)
1015 {
1016 int rc=0;
1017 Elf64_Ehdr ehdr;
1018 u64 addr;
1019
1020 addr = elfcorehdr_addr;
1021
1022 /* Read Elf header */
1023 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1024 if (rc < 0)
1025 return rc;
1026
1027 /* Do some basic Verification. */
1028 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1029 (ehdr.e_type != ET_CORE) ||
1030 !vmcore_elf64_check_arch(&ehdr) ||
1031 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1032 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1033 ehdr.e_version != EV_CURRENT ||
1034 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1035 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1036 ehdr.e_phnum == 0) {
1037 pr_warn("Warning: Core image elf header is not sane\n");
1038 return -EINVAL;
1039 }
1040
1041 /* Read in all elf headers. */
1042 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1043 ehdr.e_phnum * sizeof(Elf64_Phdr);
1044 elfcorebuf_sz = elfcorebuf_sz_orig;
1045 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1046 get_order(elfcorebuf_sz_orig));
1047 if (!elfcorebuf)
1048 return -ENOMEM;
1049 addr = elfcorehdr_addr;
1050 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1051 if (rc < 0)
1052 goto fail;
1053
1054 /* Merge all PT_NOTE headers into one. */
1055 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1056 &elfnotes_buf, &elfnotes_sz);
1057 if (rc)
1058 goto fail;
1059 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1060 elfnotes_sz, &vmcore_list);
1061 if (rc)
1062 goto fail;
1063 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1064 return 0;
1065 fail:
1066 free_elfcorebuf();
1067 return rc;
1068 }
1069
parse_crash_elf32_headers(void)1070 static int __init parse_crash_elf32_headers(void)
1071 {
1072 int rc=0;
1073 Elf32_Ehdr ehdr;
1074 u64 addr;
1075
1076 addr = elfcorehdr_addr;
1077
1078 /* Read Elf header */
1079 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1080 if (rc < 0)
1081 return rc;
1082
1083 /* Do some basic Verification. */
1084 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1085 (ehdr.e_type != ET_CORE) ||
1086 !elf_check_arch(&ehdr) ||
1087 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1088 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1089 ehdr.e_version != EV_CURRENT ||
1090 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1091 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1092 ehdr.e_phnum == 0) {
1093 pr_warn("Warning: Core image elf header is not sane\n");
1094 return -EINVAL;
1095 }
1096
1097 /* Read in all elf headers. */
1098 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1099 elfcorebuf_sz = elfcorebuf_sz_orig;
1100 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1101 get_order(elfcorebuf_sz_orig));
1102 if (!elfcorebuf)
1103 return -ENOMEM;
1104 addr = elfcorehdr_addr;
1105 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1106 if (rc < 0)
1107 goto fail;
1108
1109 /* Merge all PT_NOTE headers into one. */
1110 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1111 &elfnotes_buf, &elfnotes_sz);
1112 if (rc)
1113 goto fail;
1114 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1115 elfnotes_sz, &vmcore_list);
1116 if (rc)
1117 goto fail;
1118 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1119 return 0;
1120 fail:
1121 free_elfcorebuf();
1122 return rc;
1123 }
1124
parse_crash_elf_headers(void)1125 static int __init parse_crash_elf_headers(void)
1126 {
1127 unsigned char e_ident[EI_NIDENT];
1128 u64 addr;
1129 int rc=0;
1130
1131 addr = elfcorehdr_addr;
1132 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1133 if (rc < 0)
1134 return rc;
1135 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1136 pr_warn("Warning: Core image elf header not found\n");
1137 return -EINVAL;
1138 }
1139
1140 if (e_ident[EI_CLASS] == ELFCLASS64) {
1141 rc = parse_crash_elf64_headers();
1142 if (rc)
1143 return rc;
1144 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1145 rc = parse_crash_elf32_headers();
1146 if (rc)
1147 return rc;
1148 } else {
1149 pr_warn("Warning: Core image elf header is not sane\n");
1150 return -EINVAL;
1151 }
1152
1153 /* Determine vmcore size. */
1154 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1155 &vmcore_list);
1156
1157 return 0;
1158 }
1159
1160 /* Init function for vmcore module. */
vmcore_init(void)1161 static int __init vmcore_init(void)
1162 {
1163 int rc = 0;
1164
1165 /* Allow architectures to allocate ELF header in 2nd kernel */
1166 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1167 if (rc)
1168 return rc;
1169 /*
1170 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1171 * then capture the dump.
1172 */
1173 if (!(is_vmcore_usable()))
1174 return rc;
1175 rc = parse_crash_elf_headers();
1176 if (rc) {
1177 pr_warn("Kdump: vmcore not initialized\n");
1178 return rc;
1179 }
1180 elfcorehdr_free(elfcorehdr_addr);
1181 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1182
1183 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1184 if (proc_vmcore)
1185 proc_vmcore->size = vmcore_size;
1186 return 0;
1187 }
1188 fs_initcall(vmcore_init);
1189
1190 /* Cleanup function for vmcore module. */
vmcore_cleanup(void)1191 void vmcore_cleanup(void)
1192 {
1193 struct list_head *pos, *next;
1194
1195 if (proc_vmcore) {
1196 proc_remove(proc_vmcore);
1197 proc_vmcore = NULL;
1198 }
1199
1200 /* clear the vmcore list. */
1201 list_for_each_safe(pos, next, &vmcore_list) {
1202 struct vmcore *m;
1203
1204 m = list_entry(pos, struct vmcore, list);
1205 list_del(&m->list);
1206 kfree(m);
1207 }
1208 free_elfcorebuf();
1209 }
1210