1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "crazy_linker_elf_loader.h"
6
7 #include <limits.h> // For PAGE_SIZE and PAGE_MASK
8
9 #include "crazy_linker_debug.h"
10 #include "linker_phdr.h"
11
12 #define PAGE_START(x) ((x) & PAGE_MASK)
13 #define PAGE_OFFSET(x) ((x) & ~PAGE_MASK)
14 #define PAGE_END(x) PAGE_START((x) + (PAGE_SIZE - 1))
15
16 namespace crazy {
17
18 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
19 #define PFLAGS_TO_PROT(x) \
20 (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
21 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
22 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
23
ElfLoader()24 ElfLoader::ElfLoader()
25 : fd_(),
26 path_(NULL),
27 phdr_num_(0),
28 phdr_mmap_(NULL),
29 phdr_table_(NULL),
30 phdr_size_(0),
31 file_offset_(0),
32 wanted_load_address_(0),
33 load_start_(NULL),
34 load_size_(0),
35 load_bias_(0),
36 loaded_phdr_(NULL) {}
37
~ElfLoader()38 ElfLoader::~ElfLoader() {
39 if (phdr_mmap_) {
40 // Deallocate the temporary program header copy.
41 munmap(phdr_mmap_, phdr_size_);
42 }
43 }
44
LoadAt(const char * lib_path,off_t file_offset,uintptr_t wanted_address,Error * error)45 bool ElfLoader::LoadAt(const char* lib_path,
46 off_t file_offset,
47 uintptr_t wanted_address,
48 Error* error) {
49
50 LOG("%s: lib_path='%s', file_offset=%p, load_address=%p\n",
51 __FUNCTION__,
52 lib_path,
53 file_offset,
54 wanted_address);
55
56 // Check that the load address is properly page-aligned.
57 if (wanted_address != PAGE_START(wanted_address)) {
58 error->Format("Load address is not page aligned (%08x)", wanted_address);
59 return false;
60 }
61 wanted_load_address_ = reinterpret_cast<void*>(wanted_address);
62
63 // Check that the file offset is also properly page-aligned.
64 // PAGE_START() can't be used here due to the compiler complaining about
65 // comparing signed (off_t) and unsigned (size_t) values.
66 if ((file_offset & static_cast<off_t>(PAGE_SIZE - 1)) != 0) {
67 error->Format("File offset is not page aligned (%08x)", file_offset);
68 return false;
69 }
70 file_offset_ = file_offset;
71
72 // Open the file.
73 if (!fd_.OpenReadOnly(lib_path)) {
74 error->Format("Can't open file: %s", strerror(errno));
75 return false;
76 }
77
78 if (file_offset && fd_.SeekTo(file_offset) < 0) {
79 error->Format(
80 "Can't seek to file offset %08x: %s", file_offset, strerror(errno));
81 return false;
82 }
83
84 path_ = lib_path;
85
86 if (!ReadElfHeader(error) || !ReadProgramHeader(error) ||
87 !ReserveAddressSpace(error)) {
88 return false;
89 }
90
91 if (!LoadSegments(error) || !FindPhdr(error)) {
92 // An error occured, cleanup the address space by un-mapping the
93 // range that was reserved by ReserveAddressSpace().
94 if (load_start_ && load_size_)
95 munmap(load_start_, load_size_);
96
97 return false;
98 }
99
100 return true;
101 }
102
ReadElfHeader(Error * error)103 bool ElfLoader::ReadElfHeader(Error* error) {
104 int ret = fd_.Read(&header_, sizeof(header_));
105 if (ret < 0) {
106 error->Format("Can't read file: %s", strerror(errno));
107 return false;
108 }
109 if (ret != static_cast<int>(sizeof(header_))) {
110 error->Set("File too small to be ELF");
111 return false;
112 }
113
114 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
115 error->Set("Bad ELF magic");
116 return false;
117 }
118
119 if (header_.e_ident[EI_CLASS] != ELF::kElfClass) {
120 error->Format("Not a %d-bit class: %d",
121 ELF::kElfBits,
122 header_.e_ident[EI_CLASS]);
123 return false;
124 }
125
126 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
127 error->Format("Not little-endian class: %d", header_.e_ident[EI_DATA]);
128 return false;
129 }
130
131 if (header_.e_type != ET_DYN) {
132 error->Format("Not a shared library type: %d", header_.e_type);
133 return false;
134 }
135
136 if (header_.e_version != EV_CURRENT) {
137 error->Format("Unexpected ELF version: %d", header_.e_version);
138 return false;
139 }
140
141 if (header_.e_machine != ELF_MACHINE) {
142 error->Format("Unexpected ELF machine type: %d", header_.e_machine);
143 return false;
144 }
145
146 return true;
147 }
148
149 // Loads the program header table from an ELF file into a read-only private
150 // anonymous mmap-ed block.
ReadProgramHeader(Error * error)151 bool ElfLoader::ReadProgramHeader(Error* error) {
152 phdr_num_ = header_.e_phnum;
153
154 // Like the kernel, only accept program header tables smaller than 64 KB.
155 if (phdr_num_ < 1 || phdr_num_ > 65536 / sizeof(ELF::Phdr)) {
156 error->Format("Invalid program header count: %d", phdr_num_);
157 return false;
158 }
159
160 ELF::Addr page_min = PAGE_START(header_.e_phoff);
161 ELF::Addr page_max =
162 PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
163 ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);
164
165 phdr_size_ = page_max - page_min;
166
167 void* mmap_result = fd_.Map(
168 NULL, phdr_size_, PROT_READ, MAP_PRIVATE, page_min + file_offset_);
169 if (mmap_result == MAP_FAILED) {
170 error->Format("Phdr mmap failed: %s", strerror(errno));
171 return false;
172 }
173
174 phdr_mmap_ = mmap_result;
175 phdr_table_ = reinterpret_cast<ELF::Phdr*>(
176 reinterpret_cast<char*>(mmap_result) + page_offset);
177 return true;
178 }
179
180 // Reserve a virtual address range big enough to hold all loadable
181 // segments of a program header table. This is done by creating a
182 // private anonymous mmap() with PROT_NONE.
183 //
184 // This will use the wanted_load_address_ value,
ReserveAddressSpace(Error * error)185 bool ElfLoader::ReserveAddressSpace(Error* error) {
186 ELF::Addr min_vaddr;
187 load_size_ =
188 phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr, NULL);
189 if (load_size_ == 0) {
190 error->Set("No loadable segments");
191 return false;
192 }
193
194 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
195 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
196
197 // Support loading at a fixed address.
198 if (wanted_load_address_) {
199 addr = static_cast<uint8_t*>(wanted_load_address_);
200 mmap_flags |= MAP_FIXED;
201 }
202
203 LOG("%s: address=%p size=%p\n", __FUNCTION__, addr, load_size_);
204 void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
205 if (start == MAP_FAILED) {
206 error->Format("Could not reserve %d bytes of address space", load_size_);
207 return false;
208 }
209
210 load_start_ = start;
211 load_bias_ = reinterpret_cast<ELF::Addr>(start) - min_vaddr;
212 return true;
213 }
214
215 // Returns the address of the program header table as it appears in the loaded
216 // segments in memory. This is in contrast with 'phdr_table_' which
217 // is temporary and will be released before the library is relocated.
FindPhdr(Error * error)218 bool ElfLoader::FindPhdr(Error* error) {
219 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
220
221 // If there is a PT_PHDR, use it directly.
222 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
223 if (phdr->p_type == PT_PHDR) {
224 return CheckPhdr(load_bias_ + phdr->p_vaddr, error);
225 }
226 }
227
228 // Otherwise, check the first loadable segment. If its file offset
229 // is 0, it starts with the ELF header, and we can trivially find the
230 // loaded program header from it.
231 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
232 if (phdr->p_type == PT_LOAD) {
233 if (phdr->p_offset == 0) {
234 ELF::Addr elf_addr = load_bias_ + phdr->p_vaddr;
235 const ELF::Ehdr* ehdr = (const ELF::Ehdr*)(void*)elf_addr;
236 ELF::Addr offset = ehdr->e_phoff;
237 return CheckPhdr((ELF::Addr)ehdr + offset, error);
238 }
239 break;
240 }
241 }
242
243 error->Set("Can't find loaded program header");
244 return false;
245 }
246
247 // Ensures that our program header is actually within a loadable
248 // segment. This should help catch badly-formed ELF files that
249 // would cause the linker to crash later when trying to access it.
CheckPhdr(ELF::Addr loaded,Error * error)250 bool ElfLoader::CheckPhdr(ELF::Addr loaded, Error* error) {
251 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
252 ELF::Addr loaded_end = loaded + (phdr_num_ * sizeof(ELF::Phdr));
253 for (ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
254 if (phdr->p_type != PT_LOAD) {
255 continue;
256 }
257 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
258 ELF::Addr seg_end = phdr->p_filesz + seg_start;
259 if (seg_start <= loaded && loaded_end <= seg_end) {
260 loaded_phdr_ = reinterpret_cast<const ELF::Phdr*>(loaded);
261 return true;
262 }
263 }
264 error->Format("Loaded program header %x not in loadable segment", loaded);
265 return false;
266 }
267
268 // Map all loadable segments in process' address space.
269 // This assumes you already called phdr_table_reserve_memory to
270 // reserve the address space range for the library.
LoadSegments(Error * error)271 bool ElfLoader::LoadSegments(Error* error) {
272 for (size_t i = 0; i < phdr_num_; ++i) {
273 const ELF::Phdr* phdr = &phdr_table_[i];
274
275 if (phdr->p_type != PT_LOAD) {
276 continue;
277 }
278
279 // Segment addresses in memory.
280 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
281 ELF::Addr seg_end = seg_start + phdr->p_memsz;
282
283 ELF::Addr seg_page_start = PAGE_START(seg_start);
284 ELF::Addr seg_page_end = PAGE_END(seg_end);
285
286 ELF::Addr seg_file_end = seg_start + phdr->p_filesz;
287
288 // File offsets.
289 ELF::Addr file_start = phdr->p_offset;
290 ELF::Addr file_end = file_start + phdr->p_filesz;
291
292 ELF::Addr file_page_start = PAGE_START(file_start);
293 ELF::Addr file_length = file_end - file_page_start;
294
295 LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
296 __FUNCTION__,
297 file_offset_ + file_page_start,
298 file_length,
299 seg_page_start,
300 seg_page_start + PAGE_END(file_length));
301
302 if (file_length != 0) {
303 void* seg_addr = fd_.Map((void*)seg_page_start,
304 file_length,
305 PFLAGS_TO_PROT(phdr->p_flags),
306 MAP_FIXED | MAP_PRIVATE,
307 file_page_start + file_offset_);
308 if (seg_addr == MAP_FAILED) {
309 error->Format("Could not map segment %d: %s", i, strerror(errno));
310 return false;
311 }
312 }
313
314 // if the segment is writable, and does not end on a page boundary,
315 // zero-fill it until the page limit.
316 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
317 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
318 }
319
320 seg_file_end = PAGE_END(seg_file_end);
321
322 // seg_file_end is now the first page address after the file
323 // content. If seg_end is larger, we need to zero anything
324 // between them. This is done by using a private anonymous
325 // map for all extra pages.
326 if (seg_page_end > seg_file_end) {
327 void* zeromap = mmap((void*)seg_file_end,
328 seg_page_end - seg_file_end,
329 PFLAGS_TO_PROT(phdr->p_flags),
330 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
331 -1,
332 0);
333 if (zeromap == MAP_FAILED) {
334 error->Format("Could not zero-fill gap: %s", strerror(errno));
335 return false;
336 }
337 }
338 }
339 return true;
340 }
341
342 } // namespace crazy
343