• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/coredump.h>
4 #include <linux/elfcore.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 
8 #include <asm/cpufeature.h>
9 #include <asm/mte.h>
10 
11 #define for_each_mte_vma(tsk, vma)					\
12 	if (system_supports_mte())					\
13 		for (vma = tsk->mm->mmap; vma; vma = vma->vm_next)	\
14 			if (vma->vm_flags & VM_MTE)
15 
mte_vma_tag_dump_size(struct vm_area_struct * vma)16 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
17 {
18 	if (vma->vm_flags & VM_DONTDUMP)
19 		return 0;
20 
21 	return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
22 }
23 
24 /* Derived from dump_user_range(); start/end must be page-aligned */
mte_dump_tag_range(struct coredump_params * cprm,unsigned long start,unsigned long end)25 static int mte_dump_tag_range(struct coredump_params *cprm,
26 			      unsigned long start, unsigned long end)
27 {
28 	int ret = 1;
29 	unsigned long addr;
30 	void *tags = NULL;
31 
32 	for (addr = start; addr < end; addr += PAGE_SIZE) {
33 		struct page *page = get_dump_page(addr);
34 
35 		/*
36 		 * get_dump_page() returns NULL when encountering an empty
37 		 * page table entry that would otherwise have been filled with
38 		 * the zero page. Skip the equivalent tag dump which would
39 		 * have been all zeros.
40 		 */
41 		if (!page) {
42 			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
43 			continue;
44 		}
45 
46 		/*
47 		 * Pages mapped in user space as !pte_access_permitted() (e.g.
48 		 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
49 		 */
50 		if (!test_bit(PG_mte_tagged, &page->flags)) {
51 			put_page(page);
52 			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
53 			continue;
54 		}
55 
56 		if (!tags) {
57 			tags = mte_allocate_tag_storage();
58 			if (!tags) {
59 				put_page(page);
60 				ret = 0;
61 				break;
62 			}
63 		}
64 
65 		mte_save_page_tags(page_address(page), tags);
66 		put_page(page);
67 		if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
68 			ret = 0;
69 			break;
70 		}
71 	}
72 
73 	if (tags)
74 		mte_free_tag_storage(tags);
75 
76 	return ret;
77 }
78 
elf_core_extra_phdrs(struct coredump_params * cprm)79 Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
80 {
81 	struct vm_area_struct *vma;
82 	int vma_count = 0;
83 
84 	for_each_mte_vma(current, vma)
85 		vma_count++;
86 
87 	return vma_count;
88 }
89 
elf_core_write_extra_phdrs(struct coredump_params * cprm,loff_t offset)90 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
91 {
92 	struct vm_area_struct *vma;
93 
94 	for_each_mte_vma(current, vma) {
95 		struct elf_phdr phdr;
96 
97 		phdr.p_type = PT_AARCH64_MEMTAG_MTE;
98 		phdr.p_offset = offset;
99 		phdr.p_vaddr = vma->vm_start;
100 		phdr.p_paddr = 0;
101 		phdr.p_filesz = mte_vma_tag_dump_size(vma);
102 		phdr.p_memsz = vma->vm_end - vma->vm_start;
103 		offset += phdr.p_filesz;
104 		phdr.p_flags = 0;
105 		phdr.p_align = 0;
106 
107 		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
108 			return 0;
109 	}
110 
111 	return 1;
112 }
113 
elf_core_extra_data_size(struct coredump_params * cprm)114 size_t elf_core_extra_data_size(struct coredump_params *cprm)
115 {
116 	struct vm_area_struct *vma;
117 	size_t data_size = 0;
118 
119 	for_each_mte_vma(current, vma)
120 		data_size += mte_vma_tag_dump_size(vma);
121 
122 	return data_size;
123 }
124 
elf_core_write_extra_data(struct coredump_params * cprm)125 int elf_core_write_extra_data(struct coredump_params *cprm)
126 {
127 	struct vm_area_struct *vma;
128 
129 	for_each_mte_vma(current, vma) {
130 		if (vma->vm_flags & VM_DONTDUMP)
131 			continue;
132 
133 		if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
134 			return 0;
135 	}
136 
137 	return 1;
138 }
139