1 //===-- sanitizer_win.cc --------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements windows-specific functions from
12 // sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_platform.h"
16 #if SANITIZER_WINDOWS
17
18 #define WIN32_LEAN_AND_MEAN
19 #define NOGDI
20 #include <windows.h>
21 #include <dbghelp.h>
22 #include <io.h>
23 #include <psapi.h>
24 #include <stdlib.h>
25
26 #include "sanitizer_common.h"
27 #include "sanitizer_libc.h"
28 #include "sanitizer_mutex.h"
29 #include "sanitizer_placement_new.h"
30 #include "sanitizer_stacktrace.h"
31
32 namespace __sanitizer {
33
34 #include "sanitizer_syscall_generic.inc"
35
36 // --------------------- sanitizer_common.h
GetPageSize()37 uptr GetPageSize() {
38 SYSTEM_INFO si;
39 GetSystemInfo(&si);
40 return si.dwPageSize;
41 }
42
GetMmapGranularity()43 uptr GetMmapGranularity() {
44 SYSTEM_INFO si;
45 GetSystemInfo(&si);
46 return si.dwAllocationGranularity;
47 }
48
GetMaxVirtualAddress()49 uptr GetMaxVirtualAddress() {
50 SYSTEM_INFO si;
51 GetSystemInfo(&si);
52 return (uptr)si.lpMaximumApplicationAddress;
53 }
54
FileExists(const char * filename)55 bool FileExists(const char *filename) {
56 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
57 }
58
internal_getpid()59 uptr internal_getpid() {
60 return GetProcessId(GetCurrentProcess());
61 }
62
63 // In contrast to POSIX, on Windows GetCurrentThreadId()
64 // returns a system-unique identifier.
GetTid()65 uptr GetTid() {
66 return GetCurrentThreadId();
67 }
68
GetThreadSelf()69 uptr GetThreadSelf() {
70 return GetTid();
71 }
72
73 #if !SANITIZER_GO
GetThreadStackTopAndBottom(bool at_initialization,uptr * stack_top,uptr * stack_bottom)74 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
75 uptr *stack_bottom) {
76 CHECK(stack_top);
77 CHECK(stack_bottom);
78 MEMORY_BASIC_INFORMATION mbi;
79 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
80 // FIXME: is it possible for the stack to not be a single allocation?
81 // Are these values what ASan expects to get (reserved, not committed;
82 // including stack guard page) ?
83 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
84 *stack_bottom = (uptr)mbi.AllocationBase;
85 }
86 #endif // #if !SANITIZER_GO
87
MmapOrDie(uptr size,const char * mem_type,bool raw_report)88 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
89 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
90 if (rv == 0)
91 ReportMmapFailureAndDie(size, mem_type, "allocate",
92 GetLastError(), raw_report);
93 return rv;
94 }
95
UnmapOrDie(void * addr,uptr size)96 void UnmapOrDie(void *addr, uptr size) {
97 if (!size || !addr)
98 return;
99
100 MEMORY_BASIC_INFORMATION mbi;
101 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
102
103 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
104 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
105 // fails try MEM_DECOMMIT.
106 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
107 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
108 Report("ERROR: %s failed to "
109 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
110 SanitizerToolName, size, size, addr, GetLastError());
111 CHECK("unable to unmap" && 0);
112 }
113 }
114 }
115
116 // We want to map a chunk of address space aligned to 'alignment'.
MmapAlignedOrDie(uptr size,uptr alignment,const char * mem_type)117 void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
118 CHECK(IsPowerOfTwo(size));
119 CHECK(IsPowerOfTwo(alignment));
120
121 // Windows will align our allocations to at least 64K.
122 alignment = Max(alignment, GetMmapGranularity());
123
124 uptr mapped_addr =
125 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
126 if (!mapped_addr)
127 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
128
129 // If we got it right on the first try, return. Otherwise, unmap it and go to
130 // the slow path.
131 if (IsAligned(mapped_addr, alignment))
132 return (void*)mapped_addr;
133 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
134 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
135
136 // If we didn't get an aligned address, overallocate, find an aligned address,
137 // unmap, and try to allocate at that aligned address.
138 int retries = 0;
139 const int kMaxRetries = 10;
140 for (; retries < kMaxRetries &&
141 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
142 retries++) {
143 // Overallocate size + alignment bytes.
144 mapped_addr =
145 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
146 if (!mapped_addr)
147 ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
148 GetLastError());
149
150 // Find the aligned address.
151 uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
152
153 // Free the overallocation.
154 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
155 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
156
157 // Attempt to allocate exactly the number of bytes we need at the aligned
158 // address. This may fail for a number of reasons, in which case we continue
159 // the loop.
160 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
161 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
162 }
163
164 // Fail if we can't make this work quickly.
165 if (retries == kMaxRetries && mapped_addr == 0)
166 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
167
168 return (void *)mapped_addr;
169 }
170
MmapFixedNoReserve(uptr fixed_addr,uptr size,const char * name)171 void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
172 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
173 // but on Win64 it does.
174 (void)name; // unsupported
175 #if SANITIZER_WINDOWS64
176 // On Windows64, use MEM_COMMIT would result in error
177 // 1455:ERROR_COMMITMENT_LIMIT.
178 // We use exception handler to commit page on demand.
179 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
180 #else
181 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
182 PAGE_READWRITE);
183 #endif
184 if (p == 0)
185 Report("ERROR: %s failed to "
186 "allocate %p (%zd) bytes at %p (error code: %d)\n",
187 SanitizerToolName, size, size, fixed_addr, GetLastError());
188 return p;
189 }
190
191 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
192 // 'MmapFixedNoAccess'.
MmapFixedOrDie(uptr fixed_addr,uptr size)193 void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
194 void *p = VirtualAlloc((LPVOID)fixed_addr, size,
195 MEM_COMMIT, PAGE_READWRITE);
196 if (p == 0) {
197 char mem_type[30];
198 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
199 fixed_addr);
200 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
201 }
202 return p;
203 }
204
MmapNoReserveOrDie(uptr size,const char * mem_type)205 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
206 // FIXME: make this really NoReserve?
207 return MmapOrDie(size, mem_type);
208 }
209
MmapFixedNoAccess(uptr fixed_addr,uptr size,const char * name)210 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
211 (void)name; // unsupported
212 void *res = VirtualAlloc((LPVOID)fixed_addr, size,
213 MEM_RESERVE, PAGE_NOACCESS);
214 if (res == 0)
215 Report("WARNING: %s failed to "
216 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
217 SanitizerToolName, size, size, fixed_addr, GetLastError());
218 return res;
219 }
220
MmapNoAccess(uptr size)221 void *MmapNoAccess(uptr size) {
222 // FIXME: unsupported.
223 return nullptr;
224 }
225
MprotectNoAccess(uptr addr,uptr size)226 bool MprotectNoAccess(uptr addr, uptr size) {
227 DWORD old_protection;
228 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
229 }
230
231
FlushUnneededShadowMemory(uptr addr,uptr size)232 void FlushUnneededShadowMemory(uptr addr, uptr size) {
233 // This is almost useless on 32-bits.
234 // FIXME: add madvise-analog when we move to 64-bits.
235 }
236
NoHugePagesInRegion(uptr addr,uptr size)237 void NoHugePagesInRegion(uptr addr, uptr size) {
238 // FIXME: probably similar to FlushUnneededShadowMemory.
239 }
240
DontDumpShadowMemory(uptr addr,uptr length)241 void DontDumpShadowMemory(uptr addr, uptr length) {
242 // This is almost useless on 32-bits.
243 // FIXME: add madvise-analog when we move to 64-bits.
244 }
245
MemoryRangeIsAvailable(uptr range_start,uptr range_end)246 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
247 MEMORY_BASIC_INFORMATION mbi;
248 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
249 return mbi.Protect == PAGE_NOACCESS &&
250 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
251 }
252
MapFileToMemory(const char * file_name,uptr * buff_size)253 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
254 UNIMPLEMENTED();
255 }
256
MapWritableFileToMemory(void * addr,uptr size,fd_t fd,OFF_T offset)257 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
258 UNIMPLEMENTED();
259 }
260
261 static const int kMaxEnvNameLength = 128;
262 static const DWORD kMaxEnvValueLength = 32767;
263
264 namespace {
265
266 struct EnvVariable {
267 char name[kMaxEnvNameLength];
268 char value[kMaxEnvValueLength];
269 };
270
271 } // namespace
272
273 static const int kEnvVariables = 5;
274 static EnvVariable env_vars[kEnvVariables];
275 static int num_env_vars;
276
GetEnv(const char * name)277 const char *GetEnv(const char *name) {
278 // Note: this implementation caches the values of the environment variables
279 // and limits their quantity.
280 for (int i = 0; i < num_env_vars; i++) {
281 if (0 == internal_strcmp(name, env_vars[i].name))
282 return env_vars[i].value;
283 }
284 CHECK_LT(num_env_vars, kEnvVariables);
285 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
286 kMaxEnvValueLength);
287 if (rv > 0 && rv < kMaxEnvValueLength) {
288 CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
289 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
290 num_env_vars++;
291 return env_vars[num_env_vars - 1].value;
292 }
293 return 0;
294 }
295
GetPwd()296 const char *GetPwd() {
297 UNIMPLEMENTED();
298 }
299
GetUid()300 u32 GetUid() {
301 UNIMPLEMENTED();
302 }
303
304 namespace {
305 struct ModuleInfo {
306 const char *filepath;
307 uptr base_address;
308 uptr end_address;
309 };
310
311 #ifndef SANITIZER_GO
CompareModulesBase(const void * pl,const void * pr)312 int CompareModulesBase(const void *pl, const void *pr) {
313 const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
314 if (l->base_address < r->base_address)
315 return -1;
316 return l->base_address > r->base_address;
317 }
318 #endif
319 } // namespace
320
321 #ifndef SANITIZER_GO
DumpProcessMap()322 void DumpProcessMap() {
323 Report("Dumping process modules:\n");
324 ListOfModules modules;
325 modules.init();
326 uptr num_modules = modules.size();
327
328 InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
329 for (size_t i = 0; i < num_modules; ++i) {
330 module_infos[i].filepath = modules[i].full_name();
331 module_infos[i].base_address = modules[i].base_address();
332 module_infos[i].end_address = modules[i].ranges().front()->end;
333 }
334 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
335 CompareModulesBase);
336
337 for (size_t i = 0; i < num_modules; ++i) {
338 const ModuleInfo &mi = module_infos[i];
339 if (mi.end_address != 0) {
340 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
341 mi.filepath[0] ? mi.filepath : "[no name]");
342 } else if (mi.filepath[0]) {
343 Printf("\t??\?-??? %s\n", mi.filepath);
344 } else {
345 Printf("\t???\n");
346 }
347 }
348 }
349 #endif
350
DisableCoreDumperIfNecessary()351 void DisableCoreDumperIfNecessary() {
352 // Do nothing.
353 }
354
ReExec()355 void ReExec() {
356 UNIMPLEMENTED();
357 }
358
PrepareForSandboxing(__sanitizer_sandbox_arguments * args)359 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
360 #if !SANITIZER_GO
361 CovPrepareForSandboxing(args);
362 #endif
363 }
364
StackSizeIsUnlimited()365 bool StackSizeIsUnlimited() {
366 UNIMPLEMENTED();
367 }
368
SetStackSizeLimitInBytes(uptr limit)369 void SetStackSizeLimitInBytes(uptr limit) {
370 UNIMPLEMENTED();
371 }
372
AddressSpaceIsUnlimited()373 bool AddressSpaceIsUnlimited() {
374 UNIMPLEMENTED();
375 }
376
SetAddressSpaceUnlimited()377 void SetAddressSpaceUnlimited() {
378 UNIMPLEMENTED();
379 }
380
IsPathSeparator(const char c)381 bool IsPathSeparator(const char c) {
382 return c == '\\' || c == '/';
383 }
384
IsAbsolutePath(const char * path)385 bool IsAbsolutePath(const char *path) {
386 UNIMPLEMENTED();
387 }
388
SleepForSeconds(int seconds)389 void SleepForSeconds(int seconds) {
390 Sleep(seconds * 1000);
391 }
392
SleepForMillis(int millis)393 void SleepForMillis(int millis) {
394 Sleep(millis);
395 }
396
NanoTime()397 u64 NanoTime() {
398 return 0;
399 }
400
Abort()401 void Abort() {
402 if (::IsDebuggerPresent())
403 __debugbreak();
404 internal__exit(3);
405 }
406
407 #ifndef SANITIZER_GO
408 // Read the file to extract the ImageBase field from the PE header. If ASLR is
409 // disabled and this virtual address is available, the loader will typically
410 // load the image at this address. Therefore, we call it the preferred base. Any
411 // addresses in the DWARF typically assume that the object has been loaded at
412 // this address.
GetPreferredBase(const char * modname)413 static uptr GetPreferredBase(const char *modname) {
414 fd_t fd = OpenFile(modname, RdOnly, nullptr);
415 if (fd == kInvalidFd)
416 return 0;
417 FileCloser closer(fd);
418
419 // Read just the DOS header.
420 IMAGE_DOS_HEADER dos_header;
421 uptr bytes_read;
422 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
423 bytes_read != sizeof(dos_header))
424 return 0;
425
426 // The file should start with the right signature.
427 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
428 return 0;
429
430 // The layout at e_lfanew is:
431 // "PE\0\0"
432 // IMAGE_FILE_HEADER
433 // IMAGE_OPTIONAL_HEADER
434 // Seek to e_lfanew and read all that data.
435 char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
436 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
437 INVALID_SET_FILE_POINTER)
438 return 0;
439 if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
440 bytes_read != sizeof(buf))
441 return 0;
442
443 // Check for "PE\0\0" before the PE header.
444 char *pe_sig = &buf[0];
445 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
446 return 0;
447
448 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
449 IMAGE_OPTIONAL_HEADER *pe_header =
450 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
451
452 // Check for more magic in the PE header.
453 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
454 return 0;
455
456 // Finally, return the ImageBase.
457 return (uptr)pe_header->ImageBase;
458 }
459
init()460 void ListOfModules::init() {
461 clear();
462 HANDLE cur_process = GetCurrentProcess();
463
464 // Query the list of modules. Start by assuming there are no more than 256
465 // modules and retry if that's not sufficient.
466 HMODULE *hmodules = 0;
467 uptr modules_buffer_size = sizeof(HMODULE) * 256;
468 DWORD bytes_required;
469 while (!hmodules) {
470 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
471 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
472 &bytes_required));
473 if (bytes_required > modules_buffer_size) {
474 // Either there turned out to be more than 256 hmodules, or new hmodules
475 // could have loaded since the last try. Retry.
476 UnmapOrDie(hmodules, modules_buffer_size);
477 hmodules = 0;
478 modules_buffer_size = bytes_required;
479 }
480 }
481
482 // |num_modules| is the number of modules actually present,
483 size_t num_modules = bytes_required / sizeof(HMODULE);
484 for (size_t i = 0; i < num_modules; ++i) {
485 HMODULE handle = hmodules[i];
486 MODULEINFO mi;
487 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
488 continue;
489
490 // Get the UTF-16 path and convert to UTF-8.
491 wchar_t modname_utf16[kMaxPathLength];
492 int modname_utf16_len =
493 GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
494 if (modname_utf16_len == 0)
495 modname_utf16[0] = '\0';
496 char module_name[kMaxPathLength];
497 int module_name_len =
498 ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
499 &module_name[0], kMaxPathLength, NULL, NULL);
500 module_name[module_name_len] = '\0';
501
502 uptr base_address = (uptr)mi.lpBaseOfDll;
503 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
504
505 // Adjust the base address of the module so that we get a VA instead of an
506 // RVA when computing the module offset. This helps llvm-symbolizer find the
507 // right DWARF CU. In the common case that the image is loaded at it's
508 // preferred address, we will now print normal virtual addresses.
509 uptr preferred_base = GetPreferredBase(&module_name[0]);
510 uptr adjusted_base = base_address - preferred_base;
511
512 LoadedModule cur_module;
513 cur_module.set(module_name, adjusted_base);
514 // We add the whole module as one single address range.
515 cur_module.addAddressRange(base_address, end_address, /*executable*/ true);
516 modules_.push_back(cur_module);
517 }
518 UnmapOrDie(hmodules, modules_buffer_size);
519 };
520
521 // We can't use atexit() directly at __asan_init time as the CRT is not fully
522 // initialized at this point. Place the functions into a vector and use
523 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
524 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
525
Atexit(void (* function)(void))526 int Atexit(void (*function)(void)) {
527 atexit_functions.push_back(function);
528 return 0;
529 }
530
RunAtexit()531 static int RunAtexit() {
532 int ret = 0;
533 for (uptr i = 0; i < atexit_functions.size(); ++i) {
534 ret |= atexit(atexit_functions[i]);
535 }
536 return ret;
537 }
538
539 #pragma section(".CRT$XID", long, read) // NOLINT
540 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
541 #endif
542
543 // ------------------ sanitizer_libc.h
OpenFile(const char * filename,FileAccessMode mode,error_t * last_error)544 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
545 // FIXME: Use the wide variants to handle Unicode filenames.
546 fd_t res;
547 if (mode == RdOnly) {
548 res = CreateFileA(filename, GENERIC_READ,
549 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
550 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
551 } else if (mode == WrOnly) {
552 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
553 FILE_ATTRIBUTE_NORMAL, nullptr);
554 } else {
555 UNIMPLEMENTED();
556 }
557 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
558 CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
559 if (res == kInvalidFd && last_error)
560 *last_error = GetLastError();
561 return res;
562 }
563
CloseFile(fd_t fd)564 void CloseFile(fd_t fd) {
565 CloseHandle(fd);
566 }
567
ReadFromFile(fd_t fd,void * buff,uptr buff_size,uptr * bytes_read,error_t * error_p)568 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
569 error_t *error_p) {
570 CHECK(fd != kInvalidFd);
571
572 // bytes_read can't be passed directly to ReadFile:
573 // uptr is unsigned long long on 64-bit Windows.
574 unsigned long num_read_long;
575
576 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
577 if (!success && error_p)
578 *error_p = GetLastError();
579 if (bytes_read)
580 *bytes_read = num_read_long;
581 return success;
582 }
583
SupportsColoredOutput(fd_t fd)584 bool SupportsColoredOutput(fd_t fd) {
585 // FIXME: support colored output.
586 return false;
587 }
588
WriteToFile(fd_t fd,const void * buff,uptr buff_size,uptr * bytes_written,error_t * error_p)589 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
590 error_t *error_p) {
591 CHECK(fd != kInvalidFd);
592
593 // Handle null optional parameters.
594 error_t dummy_error;
595 error_p = error_p ? error_p : &dummy_error;
596 uptr dummy_bytes_written;
597 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
598
599 // Initialize output parameters in case we fail.
600 *error_p = 0;
601 *bytes_written = 0;
602
603 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
604 // closed, in which case this will fail.
605 if (fd == kStdoutFd || fd == kStderrFd) {
606 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
607 if (fd == 0) {
608 *error_p = ERROR_INVALID_HANDLE;
609 return false;
610 }
611 }
612
613 DWORD bytes_written_32;
614 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
615 *error_p = GetLastError();
616 return false;
617 } else {
618 *bytes_written = bytes_written_32;
619 return true;
620 }
621 }
622
RenameFile(const char * oldpath,const char * newpath,error_t * error_p)623 bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
624 UNIMPLEMENTED();
625 }
626
internal_sched_yield()627 uptr internal_sched_yield() {
628 Sleep(0);
629 return 0;
630 }
631
internal__exit(int exitcode)632 void internal__exit(int exitcode) {
633 ExitProcess(exitcode);
634 }
635
internal_ftruncate(fd_t fd,uptr size)636 uptr internal_ftruncate(fd_t fd, uptr size) {
637 UNIMPLEMENTED();
638 }
639
GetRSS()640 uptr GetRSS() {
641 return 0;
642 }
643
internal_start_thread(void (* func)(void * arg),void * arg)644 void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
internal_join_thread(void * th)645 void internal_join_thread(void *th) { }
646
647 // ---------------------- BlockingMutex ---------------- {{{1
648 const uptr LOCK_UNINITIALIZED = 0;
649 const uptr LOCK_READY = (uptr)-1;
650
BlockingMutex(LinkerInitialized li)651 BlockingMutex::BlockingMutex(LinkerInitialized li) {
652 // FIXME: see comments in BlockingMutex::Lock() for the details.
653 CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
654
655 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
656 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
657 owner_ = LOCK_READY;
658 }
659
BlockingMutex()660 BlockingMutex::BlockingMutex() {
661 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
662 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
663 owner_ = LOCK_READY;
664 }
665
Lock()666 void BlockingMutex::Lock() {
667 if (owner_ == LOCK_UNINITIALIZED) {
668 // FIXME: hm, global BlockingMutex objects are not initialized?!?
669 // This might be a side effect of the clang+cl+link Frankenbuild...
670 new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
671
672 // FIXME: If it turns out the linker doesn't invoke our
673 // constructors, we should probably manually Lock/Unlock all the global
674 // locks while we're starting in one thread to avoid double-init races.
675 }
676 EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
677 CHECK_EQ(owner_, LOCK_READY);
678 owner_ = GetThreadSelf();
679 }
680
Unlock()681 void BlockingMutex::Unlock() {
682 CHECK_EQ(owner_, GetThreadSelf());
683 owner_ = LOCK_READY;
684 LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
685 }
686
CheckLocked()687 void BlockingMutex::CheckLocked() {
688 CHECK_EQ(owner_, GetThreadSelf());
689 }
690
GetTlsSize()691 uptr GetTlsSize() {
692 return 0;
693 }
694
InitTlsSize()695 void InitTlsSize() {
696 }
697
GetThreadStackAndTls(bool main,uptr * stk_addr,uptr * stk_size,uptr * tls_addr,uptr * tls_size)698 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
699 uptr *tls_addr, uptr *tls_size) {
700 #ifdef SANITIZER_GO
701 *stk_addr = 0;
702 *stk_size = 0;
703 *tls_addr = 0;
704 *tls_size = 0;
705 #else
706 uptr stack_top, stack_bottom;
707 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
708 *stk_addr = stack_bottom;
709 *stk_size = stack_top - stack_bottom;
710 *tls_addr = 0;
711 *tls_size = 0;
712 #endif
713 }
714
715 #if !SANITIZER_GO
SlowUnwindStack(uptr pc,u32 max_depth)716 void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
717 CHECK_GE(max_depth, 2);
718 // FIXME: CaptureStackBackTrace might be too slow for us.
719 // FIXME: Compare with StackWalk64.
720 // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
721 size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
722 (void**)trace, 0);
723 if (size == 0)
724 return;
725
726 // Skip the RTL frames by searching for the PC in the stacktrace.
727 uptr pc_location = LocatePcInTrace(pc);
728 PopStackFrames(pc_location);
729 }
730
SlowUnwindStackWithContext(uptr pc,void * context,u32 max_depth)731 void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
732 u32 max_depth) {
733 CONTEXT ctx = *(CONTEXT *)context;
734 STACKFRAME64 stack_frame;
735 memset(&stack_frame, 0, sizeof(stack_frame));
736 size = 0;
737 #if defined(_WIN64)
738 int machine_type = IMAGE_FILE_MACHINE_AMD64;
739 stack_frame.AddrPC.Offset = ctx.Rip;
740 stack_frame.AddrFrame.Offset = ctx.Rbp;
741 stack_frame.AddrStack.Offset = ctx.Rsp;
742 #else
743 int machine_type = IMAGE_FILE_MACHINE_I386;
744 stack_frame.AddrPC.Offset = ctx.Eip;
745 stack_frame.AddrFrame.Offset = ctx.Ebp;
746 stack_frame.AddrStack.Offset = ctx.Esp;
747 #endif
748 stack_frame.AddrPC.Mode = AddrModeFlat;
749 stack_frame.AddrFrame.Mode = AddrModeFlat;
750 stack_frame.AddrStack.Mode = AddrModeFlat;
751 while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
752 &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
753 &SymGetModuleBase64, NULL) &&
754 size < Min(max_depth, kStackTraceMax)) {
755 trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
756 }
757 }
758 #endif // #if !SANITIZER_GO
759
Write(const char * buffer,uptr length)760 void ReportFile::Write(const char *buffer, uptr length) {
761 SpinMutexLock l(mu);
762 ReopenIfNecessary();
763 if (!WriteToFile(fd, buffer, length)) {
764 // stderr may be closed, but we may be able to print to the debugger
765 // instead. This is the case when launching a program from Visual Studio,
766 // and the following routine should write to its console.
767 OutputDebugStringA(buffer);
768 }
769 }
770
SetAlternateSignalStack()771 void SetAlternateSignalStack() {
772 // FIXME: Decide what to do on Windows.
773 }
774
UnsetAlternateSignalStack()775 void UnsetAlternateSignalStack() {
776 // FIXME: Decide what to do on Windows.
777 }
778
InstallDeadlySignalHandlers(SignalHandlerType handler)779 void InstallDeadlySignalHandlers(SignalHandlerType handler) {
780 (void)handler;
781 // FIXME: Decide what to do on Windows.
782 }
783
IsHandledDeadlySignal(int signum)784 bool IsHandledDeadlySignal(int signum) {
785 // FIXME: Decide what to do on Windows.
786 return false;
787 }
788
IsAccessibleMemoryRange(uptr beg,uptr size)789 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
790 SYSTEM_INFO si;
791 GetNativeSystemInfo(&si);
792 uptr page_size = si.dwPageSize;
793 uptr page_mask = ~(page_size - 1);
794
795 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
796 page <= end;) {
797 MEMORY_BASIC_INFORMATION info;
798 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
799 return false;
800
801 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
802 info.Protect == PAGE_EXECUTE)
803 return false;
804
805 if (info.RegionSize == 0)
806 return false;
807
808 page += info.RegionSize;
809 }
810
811 return true;
812 }
813
Create(void * siginfo,void * context)814 SignalContext SignalContext::Create(void *siginfo, void *context) {
815 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
816 CONTEXT *context_record = (CONTEXT *)context;
817
818 uptr pc = (uptr)exception_record->ExceptionAddress;
819 #ifdef _WIN64
820 uptr bp = (uptr)context_record->Rbp;
821 uptr sp = (uptr)context_record->Rsp;
822 #else
823 uptr bp = (uptr)context_record->Ebp;
824 uptr sp = (uptr)context_record->Esp;
825 #endif
826 uptr access_addr = exception_record->ExceptionInformation[1];
827
828 // The contents of this array are documented at
829 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
830 // The first element indicates read as 0, write as 1, or execute as 8. The
831 // second element is the faulting address.
832 WriteFlag write_flag = SignalContext::UNKNOWN;
833 switch (exception_record->ExceptionInformation[0]) {
834 case 0: write_flag = SignalContext::READ; break;
835 case 1: write_flag = SignalContext::WRITE; break;
836 case 8: write_flag = SignalContext::UNKNOWN; break;
837 }
838 bool is_memory_access = write_flag != SignalContext::UNKNOWN;
839 return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
840 write_flag);
841 }
842
ReadBinaryName(char * buf,uptr buf_len)843 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
844 // FIXME: Actually implement this function.
845 CHECK_GT(buf_len, 0);
846 buf[0] = 0;
847 return 0;
848 }
849
ReadLongProcessName(char * buf,uptr buf_len)850 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
851 return ReadBinaryName(buf, buf_len);
852 }
853
CheckVMASize()854 void CheckVMASize() {
855 // Do nothing.
856 }
857
MaybeReexec()858 void MaybeReexec() {
859 // No need to re-exec on Windows.
860 }
861
GetArgv()862 char **GetArgv() {
863 // FIXME: Actually implement this function.
864 return 0;
865 }
866
StartSubprocess(const char * program,const char * const argv[],fd_t stdin_fd,fd_t stdout_fd,fd_t stderr_fd)867 pid_t StartSubprocess(const char *program, const char *const argv[],
868 fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
869 // FIXME: implement on this platform
870 // Should be implemented based on
871 // SymbolizerProcess::StarAtSymbolizerSubprocess
872 // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
873 return -1;
874 }
875
IsProcessRunning(pid_t pid)876 bool IsProcessRunning(pid_t pid) {
877 // FIXME: implement on this platform.
878 return false;
879 }
880
WaitForProcess(pid_t pid)881 int WaitForProcess(pid_t pid) { return -1; }
882
883 } // namespace __sanitizer
884
885 #endif // _WIN32
886