• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/nacl/loader/nonsfi/elf_loader.h"
6 
7 #include <elf.h>
8 #include <link.h>
9 
10 #include <cstring>
11 #include <string>
12 #include <sys/mman.h>
13 
14 #include "base/logging.h"
15 #include "base/strings/string_number_conversions.h"
16 #include "native_client/src/include/portability.h"
17 #include "native_client/src/shared/platform/nacl_host_desc.h"
18 #include "native_client/src/trusted/desc/nacl_desc_base.h"
19 #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
20 #include "native_client/src/trusted/service_runtime/include/bits/mman.h"
21 
22 // Extracted from native_client/src/trusted/service_runtime/nacl_config.h.
23 #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86
24 # if NACL_BUILD_SUBARCH == 64
25 #  define NACL_ELF_E_MACHINE EM_X86_64
26 # elif NACL_BUILD_SUBARCH == 32
27 #  define NACL_ELF_E_MACHINE EM_386
28 # else
29 #  error Unknown platform.
30 # endif
31 #elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_arm
32 # define NACL_ELF_E_MACHINE EM_ARM
33 #elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_mips
34 # define NACL_ELF_E_MACHINE EM_MIPS
35 #else
36 # error Unknown platform.
37 #endif
38 
39 namespace nacl {
40 namespace nonsfi {
41 namespace {
42 
43 // Page size for non-SFI Mode.
44 const ElfW(Addr) kNonSfiPageSize = 4096;
45 const ElfW(Addr) kNonSfiPageMask = kNonSfiPageSize - 1;
46 
ValidateElfHeader(const ElfW (Ehdr)& ehdr)47 NaClErrorCode ValidateElfHeader(const ElfW(Ehdr)& ehdr) {
48   if (std::memcmp(ehdr.e_ident, ELFMAG, SELFMAG)) {
49     LOG(ERROR) << "Bad elf magic";
50     return LOAD_BAD_ELF_MAGIC;
51   }
52 
53 #if NACL_BUILD_SUBARCH == 32
54   if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
55     LOG(ERROR) << "Bad elf class";
56     return LOAD_NOT_32_BIT;
57   }
58 #elif NACL_BUILD_SUBARCH == 64
59   if (ehdr.e_ident[EI_CLASS] != ELFCLASS64) {
60     LOG(ERROR) << "Bad elf class";
61     return LOAD_NOT_64_BIT;
62   }
63 #else
64 # error Unknown platform.
65 #endif
66 
67   if (ehdr.e_type != ET_DYN) {
68     LOG(ERROR) << "Not a relocatable ELF object (not ET_DYN)";
69     return LOAD_NOT_EXEC;
70   }
71 
72   if (ehdr.e_machine != NACL_ELF_E_MACHINE) {
73     LOG(ERROR) << "Bad machine: "
74                << base::HexEncode(&ehdr.e_machine, sizeof(ehdr.e_machine));
75     return LOAD_BAD_MACHINE;
76   }
77 
78   if (ehdr.e_version != EV_CURRENT) {
79     LOG(ERROR) << "Bad elf version: "
80                << base::HexEncode(&ehdr.e_version, sizeof(ehdr.e_version));
81   }
82 
83   return LOAD_OK;
84 }
85 
86 // Returns the address of the page starting at address 'addr' for non-SFI mode.
GetPageStart(ElfW (Addr)addr)87 ElfW(Addr) GetPageStart(ElfW(Addr) addr) {
88   return addr & ~kNonSfiPageMask;
89 }
90 
91 // Returns the offset of address 'addr' in its memory page. In other words,
92 // this equals to 'addr' - GetPageStart(addr).
GetPageOffset(ElfW (Addr)addr)93 ElfW(Addr) GetPageOffset(ElfW(Addr) addr) {
94   return addr & kNonSfiPageMask;
95 }
96 
97 // Returns the address of the next page after address 'addr', unless 'addr' is
98 // at the start of a page. This equals to:
99 //   addr == GetPageStart(addr) ? addr : GetPageStart(addr) + kNonSfiPageSize
GetPageEnd(ElfW (Addr)addr)100 ElfW(Addr) GetPageEnd(ElfW(Addr) addr) {
101   return GetPageStart(addr + kNonSfiPageSize - 1);
102 }
103 
104 // Converts the pflags (in phdr) to mmap's prot flags.
PFlagsToProt(int pflags)105 int PFlagsToProt(int pflags) {
106   return ((pflags & PF_X) ? PROT_EXEC : 0) |
107          ((pflags & PF_R) ? PROT_READ : 0) |
108          ((pflags & PF_W) ? PROT_WRITE : 0);
109 }
110 
111 // Converts the pflags (in phdr) to NaCl ABI's prot flags.
PFlagsToNaClProt(int pflags)112 int PFlagsToNaClProt(int pflags) {
113   return ((pflags & PF_X) ? NACL_ABI_PROT_EXEC : 0) |
114          ((pflags & PF_R) ? NACL_ABI_PROT_READ : 0) |
115          ((pflags & PF_W) ? NACL_ABI_PROT_WRITE : 0);
116 }
117 
118 // Returns the load size for the given phdrs, or 0 on error.
GetLoadSize(const ElfW (Phdr)* phdrs,int phnum)119 ElfW(Addr) GetLoadSize(const ElfW(Phdr)* phdrs, int phnum) {
120   ElfW(Addr) begin = ~static_cast<ElfW(Addr)>(0);
121   ElfW(Addr) end = 0;
122 
123   for (int i = 0; i < phnum; ++i) {
124     const ElfW(Phdr)& phdr = phdrs[i];
125     if (phdr.p_type != PT_LOAD) {
126       // Do nothing for non PT_LOAD header.
127       continue;
128     }
129 
130     begin = std::min(begin, phdr.p_vaddr);
131     end = std::max(end, phdr.p_vaddr + phdr.p_memsz);
132   }
133 
134   if (begin > end) {
135     // The end address looks overflowing, or PT_LOAD is not found.
136     return 0;
137   }
138 
139   return GetPageEnd(end) - GetPageStart(begin);
140 }
141 
142 // Reserves the memory for the given phdrs, and stores the memory bias to the
143 // load_bias.
ReserveMemory(const ElfW (Phdr)* phdrs,int phnum,ElfW (Addr)* load_bias)144 NaClErrorCode ReserveMemory(const ElfW(Phdr)* phdrs,
145                             int phnum,
146                             ElfW(Addr)* load_bias) {
147   ElfW(Addr) size = GetLoadSize(phdrs, phnum);
148   if (size == 0) {
149     LOG(ERROR) << "ReserveMemory failed to calculate size";
150     return LOAD_UNLOADABLE;
151   }
152 
153   // Make sure that the given program headers represents PIE binary.
154   for (int i = 0; i < phnum; ++i) {
155     if (phdrs[i].p_type == PT_LOAD) {
156       // Here, phdrs[i] is the first loadable segment.
157       if (phdrs[i].p_vaddr != 0) {
158         // The binary is not PIE (i.e. needs to be loaded onto fixed addressed
159         // memory. We don't support such a case.
160         LOG(ERROR)
161             << "ReserveMemory: Non-PIE binary loading is not supported.";
162         return LOAD_UNLOADABLE;
163       }
164       break;
165     }
166   }
167 
168   void* start = mmap(0, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
169   if (start == MAP_FAILED) {
170     LOG(ERROR) << "ReserveMemory: failed to mmap.";
171     return LOAD_NO_MEMORY;
172   }
173 
174   *load_bias = reinterpret_cast<ElfW(Addr)>(start);
175   return LOAD_OK;
176 }
177 
LoadSegments(const ElfW (Phdr)* phdrs,int phnum,ElfW (Addr)load_bias,struct NaClDesc * descriptor)178 NaClErrorCode LoadSegments(
179     const ElfW(Phdr)* phdrs, int phnum, ElfW(Addr) load_bias,
180     struct NaClDesc* descriptor) {
181   for (int i = 0; i < phnum; ++i) {
182     const ElfW(Phdr)& phdr = phdrs[i];
183     if (phdr.p_type != PT_LOAD) {
184       // Not a load target.
185       continue;
186     }
187 
188     // Addresses on the memory.
189     ElfW(Addr) seg_start = phdr.p_vaddr + load_bias;
190     ElfW(Addr) seg_end = seg_start + phdr.p_memsz;
191     ElfW(Addr) seg_page_start = GetPageStart(seg_start);
192     ElfW(Addr) seg_page_end = GetPageEnd(seg_end);
193     ElfW(Addr) seg_file_end = seg_start + phdr.p_filesz;
194 
195     // Addresses on the file content.
196     ElfW(Addr) file_start = phdr.p_offset;
197     ElfW(Addr) file_end = file_start + phdr.p_filesz;
198     ElfW(Addr) file_page_start = GetPageStart(file_start);
199 
200     uintptr_t seg_addr = (*NACL_VTBL(NaClDesc, descriptor)->Map)(
201         descriptor,
202         NaClDescEffectorTrustedMem(),
203         reinterpret_cast<void *>(seg_page_start),
204         file_end - file_page_start,
205         PFlagsToNaClProt(phdr.p_flags),
206         NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
207         file_page_start);
208     if (NaClPtrIsNegErrno(&seg_addr)) {
209       LOG(ERROR) << "LoadSegments: [" << i << "] mmap failed, " << seg_addr;
210       return LOAD_NO_MEMORY;
211     }
212 
213     // Handle the BSS: fill Zero between the segment end and the page boundary
214     // if necessary (i.e. if the segment doesn't end on a page boundary).
215     ElfW(Addr) seg_file_end_offset = GetPageOffset(seg_file_end);
216     if ((phdr.p_flags & PF_W) && seg_file_end_offset > 0) {
217       memset(reinterpret_cast<void *>(seg_file_end), 0,
218              kNonSfiPageSize - seg_file_end_offset);
219     }
220 
221     // Hereafter, seg_file_end is now the first page address after the file
222     // content. If seg_end is larger, we need to zero anything between them.
223     // This is done by using a private anonymous mmap for all extra pages.
224     seg_file_end = GetPageEnd(seg_file_end);
225     if (seg_page_end > seg_file_end) {
226       void* zeromap = mmap(reinterpret_cast<void *>(seg_file_end),
227                            seg_page_end - seg_file_end,
228                            PFlagsToProt(phdr.p_flags),
229                            MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
230                            -1, 0);
231       if (zeromap == MAP_FAILED) {
232         LOG(ERROR) << "LoadSegments: [" << i << "] Failed to zeromap.";
233         return LOAD_NO_MEMORY;
234       }
235     }
236   }
237   return LOAD_OK;
238 }
239 
240 }  // namespace
241 
242 struct ElfImage::Data {
243   // Limit of elf program headers allowed.
244   enum {
245     MAX_PROGRAM_HEADERS = 128
246   };
247 
248   ElfW(Ehdr) ehdr;
249   ElfW(Phdr) phdrs[MAX_PROGRAM_HEADERS];
250   ElfW(Addr) load_bias;
251 };
252 
ElfImage()253 ElfImage::ElfImage() {
254 }
255 
~ElfImage()256 ElfImage::~ElfImage() {
257 }
258 
entry_point() const259 uintptr_t ElfImage::entry_point() const {
260   if (!data_) {
261     LOG(DFATAL) << "entry_point must be called after Read().";
262     return 0;
263   }
264   return data_->ehdr.e_entry + data_->load_bias;
265 }
266 
Read(struct NaClDesc * descriptor)267 NaClErrorCode ElfImage::Read(struct NaClDesc* descriptor) {
268   DCHECK(!data_);
269 
270   ::scoped_ptr<Data> data(new Data);
271 
272   // Read elf header.
273   ssize_t read_ret = (*NACL_VTBL(NaClDesc, descriptor)->PRead)(
274       descriptor, &data->ehdr, sizeof(data->ehdr), 0);
275   if (NaClSSizeIsNegErrno(&read_ret) ||
276       static_cast<size_t>(read_ret) != sizeof(data->ehdr)) {
277     LOG(ERROR) << "Could not load elf headers.";
278     return LOAD_READ_ERROR;
279   }
280 
281   NaClErrorCode error_code = ValidateElfHeader(data->ehdr);
282   if (error_code != LOAD_OK)
283     return error_code;
284 
285   // Read program headers.
286   if (data->ehdr.e_phnum > Data::MAX_PROGRAM_HEADERS) {
287     LOG(ERROR) << "Too many program headers";
288     return LOAD_TOO_MANY_PROG_HDRS;
289   }
290 
291   if (data->ehdr.e_phentsize != sizeof(data->phdrs[0])) {
292     LOG(ERROR) << "Bad program headers size\n"
293                << "  ehdr_.e_phentsize = " << data->ehdr.e_phentsize << "\n"
294                << "  sizeof phdrs[0] = " << sizeof(data->phdrs[0]);
295     return LOAD_BAD_PHENTSIZE;
296   }
297 
298   size_t read_size = data->ehdr.e_phnum * data->ehdr.e_phentsize;
299   read_ret = (*NACL_VTBL(NaClDesc, descriptor)->PRead)(
300       descriptor, data->phdrs, read_size, data->ehdr.e_phoff);
301 
302   if (NaClSSizeIsNegErrno(&read_ret) ||
303       static_cast<size_t>(read_ret) != read_size) {
304     LOG(ERROR) << "Cannot load prog headers";
305     return LOAD_READ_ERROR;
306   }
307 
308   data_.swap(data);
309   return LOAD_OK;
310 }
311 
Load(struct NaClDesc * descriptor)312 NaClErrorCode ElfImage::Load(struct NaClDesc* descriptor) {
313   if (!data_) {
314     LOG(DFATAL) << "ElfImage::Load() must be called after Read()";
315     return LOAD_INTERNAL;
316   }
317 
318   NaClErrorCode error =
319       ReserveMemory(data_->phdrs, data_->ehdr.e_phnum, &data_->load_bias);
320   if (error != LOAD_OK) {
321     LOG(ERROR) << "ElfImage::Load: Failed to allocate memory";
322     return error;
323   }
324 
325   error = LoadSegments(
326       data_->phdrs, data_->ehdr.e_phnum, data_->load_bias, descriptor);
327   if (error != LOAD_OK) {
328     LOG(ERROR) << "ElfImage::Load: Failed to load segments";
329     return error;
330   }
331 
332   return LOAD_OK;
333 }
334 
335 }  // namespace nonsfi
336 }  // namespace nacl
337