1 //===-- hwasan_report.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // Error reporting.
12 //===----------------------------------------------------------------------===//
13
14 #include "hwasan_report.h"
15
16 #include <dlfcn.h>
17
18 #include "hwasan.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_mutex.h"
28 #include "sanitizer_common/sanitizer_report_decorator.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
31 #include "sanitizer_common/sanitizer_symbolizer.h"
32
33 using namespace __sanitizer;
34
35 namespace __hwasan {
36
37 class ScopedReport {
38 public:
ScopedReport(bool fatal=false)39 ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
40 BlockingMutexLock lock(&error_message_lock_);
41 error_message_ptr_ = fatal ? &error_message_ : nullptr;
42 ++hwasan_report_count;
43 }
44
~ScopedReport()45 ~ScopedReport() {
46 void (*report_cb)(const char *);
47 {
48 BlockingMutexLock lock(&error_message_lock_);
49 report_cb = error_report_callback_;
50 error_message_ptr_ = nullptr;
51 }
52 if (report_cb)
53 report_cb(error_message_.data());
54 if (fatal)
55 SetAbortMessage(error_message_.data());
56 if (common_flags()->print_module_map >= 2 ||
57 (fatal && common_flags()->print_module_map))
58 DumpProcessMap();
59 if (fatal)
60 Die();
61 }
62
MaybeAppendToErrorMessage(const char * msg)63 static void MaybeAppendToErrorMessage(const char *msg) {
64 BlockingMutexLock lock(&error_message_lock_);
65 if (!error_message_ptr_)
66 return;
67 uptr len = internal_strlen(msg);
68 uptr old_size = error_message_ptr_->size();
69 error_message_ptr_->resize(old_size + len);
70 // overwrite old trailing '\0', keep new trailing '\0' untouched.
71 internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
72 }
73
SetErrorReportCallback(void (* callback)(const char *))74 static void SetErrorReportCallback(void (*callback)(const char *)) {
75 BlockingMutexLock lock(&error_message_lock_);
76 error_report_callback_ = callback;
77 }
78
79 private:
80 ScopedErrorReportLock error_report_lock_;
81 InternalMmapVector<char> error_message_;
82 bool fatal;
83
84 static InternalMmapVector<char> *error_message_ptr_;
85 static BlockingMutex error_message_lock_;
86 static void (*error_report_callback_)(const char *);
87 };
88
89 InternalMmapVector<char> *ScopedReport::error_message_ptr_;
90 BlockingMutex ScopedReport::error_message_lock_;
91 void (*ScopedReport::error_report_callback_)(const char *);
92
93 // If there is an active ScopedReport, append to its error message.
AppendToErrorMessageBuffer(const char * buffer)94 void AppendToErrorMessageBuffer(const char *buffer) {
95 ScopedReport::MaybeAppendToErrorMessage(buffer);
96 }
97
GetStackTraceFromId(u32 id)98 static StackTrace GetStackTraceFromId(u32 id) {
99 CHECK(id);
100 StackTrace res = StackDepotGet(id);
101 CHECK(res.trace);
102 return res;
103 }
104
105 // A RAII object that holds a copy of the current thread stack ring buffer.
106 // The actual stack buffer may change while we are iterating over it (for
107 // example, Printf may call syslog() which can itself be built with hwasan).
108 class SavedStackAllocations {
109 public:
SavedStackAllocations(StackAllocationsRingBuffer * rb)110 SavedStackAllocations(StackAllocationsRingBuffer *rb) {
111 uptr size = rb->size() * sizeof(uptr);
112 void *storage =
113 MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
114 new (&rb_) StackAllocationsRingBuffer(*rb, storage);
115 }
116
~SavedStackAllocations()117 ~SavedStackAllocations() {
118 StackAllocationsRingBuffer *rb = get();
119 UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
120 }
121
get()122 StackAllocationsRingBuffer *get() {
123 return (StackAllocationsRingBuffer *)&rb_;
124 }
125
126 private:
127 uptr rb_;
128 };
129
130 class Decorator: public __sanitizer::SanitizerCommonDecorator {
131 public:
Decorator()132 Decorator() : SanitizerCommonDecorator() { }
Access()133 const char *Access() { return Blue(); }
Allocation() const134 const char *Allocation() const { return Magenta(); }
Origin() const135 const char *Origin() const { return Magenta(); }
Name() const136 const char *Name() const { return Green(); }
Location()137 const char *Location() { return Green(); }
Thread()138 const char *Thread() { return Green(); }
139 };
140
FindHeapAllocation(HeapAllocationsRingBuffer * rb,uptr tagged_addr,HeapAllocationRecord * har,uptr * ring_index,uptr * num_matching_addrs,uptr * num_matching_addrs_4b)141 static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
142 HeapAllocationRecord *har, uptr *ring_index,
143 uptr *num_matching_addrs,
144 uptr *num_matching_addrs_4b) {
145 if (!rb) return false;
146
147 *num_matching_addrs = 0;
148 *num_matching_addrs_4b = 0;
149 for (uptr i = 0, size = rb->size(); i < size; i++) {
150 auto h = (*rb)[i];
151 if (h.tagged_addr <= tagged_addr &&
152 h.tagged_addr + h.requested_size > tagged_addr) {
153 *har = h;
154 *ring_index = i;
155 return true;
156 }
157
158 // Measure the number of heap ring buffer entries that would have matched
159 // if we had only one entry per address (e.g. if the ring buffer data was
160 // stored at the address itself). This will help us tune the allocator
161 // implementation for MTE.
162 if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
163 UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
164 ++*num_matching_addrs;
165 }
166
167 // Measure the number of heap ring buffer entries that would have matched
168 // if we only had 4 tag bits, which is the case for MTE.
169 auto untag_4b = [](uptr p) {
170 return p & ((1ULL << 60) - 1);
171 };
172 if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
173 untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
174 ++*num_matching_addrs_4b;
175 }
176 }
177 return false;
178 }
179
PrintStackAllocations(StackAllocationsRingBuffer * sa,tag_t addr_tag,uptr untagged_addr)180 static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
181 tag_t addr_tag, uptr untagged_addr) {
182 uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
183 bool found_local = false;
184 for (uptr i = 0; i < frames; i++) {
185 const uptr *record_addr = &(*sa)[i];
186 uptr record = *record_addr;
187 if (!record)
188 break;
189 tag_t base_tag =
190 reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
191 uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
192 uptr pc_mask = (1ULL << kRecordFPShift) - 1;
193 uptr pc = record & pc_mask;
194 FrameInfo frame;
195 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
196 for (LocalInfo &local : frame.locals) {
197 if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
198 continue;
199 tag_t obj_tag = base_tag ^ local.tag_offset;
200 if (obj_tag != addr_tag)
201 continue;
202 // Calculate the offset from the object address to the faulting
203 // address. Because we only store bits 4-19 of FP (bits 0-3 are
204 // guaranteed to be zero), the calculation is performed mod 2^20 and may
205 // harmlessly underflow if the address mod 2^20 is below the object
206 // address.
207 uptr obj_offset =
208 (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
209 if (obj_offset >= local.size)
210 continue;
211 if (!found_local) {
212 Printf("Potentially referenced stack objects:\n");
213 found_local = true;
214 }
215 Printf(" %s in %s %s:%d\n", local.name, local.function_name,
216 local.decl_file, local.decl_line);
217 }
218 frame.Clear();
219 }
220 }
221
222 if (found_local)
223 return;
224
225 // We didn't find any locals. Most likely we don't have symbols, so dump
226 // the information that we have for offline analysis.
227 InternalScopedString frame_desc(GetPageSizeCached() * 2);
228 Printf("Previously allocated frames:\n");
229 for (uptr i = 0; i < frames; i++) {
230 const uptr *record_addr = &(*sa)[i];
231 uptr record = *record_addr;
232 if (!record)
233 break;
234 uptr pc_mask = (1ULL << 48) - 1;
235 uptr pc = record & pc_mask;
236 frame_desc.append(" record_addr:0x%zx record:0x%zx",
237 reinterpret_cast<uptr>(record_addr), record);
238 if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
239 RenderFrame(&frame_desc, " %F %L\n", 0, frame->info.address, &frame->info,
240 common_flags()->symbolize_vs_style,
241 common_flags()->strip_path_prefix);
242 frame->ClearAll();
243 }
244 Printf("%s", frame_desc.data());
245 frame_desc.clear();
246 }
247 }
248
249 // Returns true if tag == *tag_ptr, reading tags from short granules if
250 // necessary. This may return a false positive if tags 1-15 are used as a
251 // regular tag rather than a short granule marker.
TagsEqual(tag_t tag,tag_t * tag_ptr)252 static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
253 if (tag == *tag_ptr)
254 return true;
255 if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
256 return false;
257 uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
258 tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
259 return tag == inline_tag;
260 }
261
262 // HWASan globals store the size of the global in the descriptor. In cases where
263 // we don't have a binary with symbols, we can't grab the size of the global
264 // from the debug info - but we might be able to retrieve it from the
265 // descriptor. Returns zero if the lookup failed.
GetGlobalSizeFromDescriptor(uptr ptr)266 static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
267 // Find the ELF object that this global resides in.
268 Dl_info info;
269 if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
270 return 0;
271 auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
272 auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
273 reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
274
275 // Get the load bias. This is normally the same as the dli_fbase address on
276 // position-independent code, but can be different on non-PIE executables,
277 // binaries using LLD's partitioning feature, or binaries compiled with a
278 // linker script.
279 ElfW(Addr) load_bias = 0;
280 for (const auto &phdr :
281 ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
282 if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
283 continue;
284 load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
285 break;
286 }
287
288 // Walk all globals in this ELF object, looking for the one we're interested
289 // in. Once we find it, we can stop iterating and return the size of the
290 // global we're interested in.
291 for (const hwasan_global &global :
292 HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
293 if (global.addr() <= ptr && ptr < global.addr() + global.size())
294 return global.size();
295
296 return 0;
297 }
298
PrintAddressDescription(uptr tagged_addr,uptr access_size,StackAllocationsRingBuffer * current_stack_allocations)299 void PrintAddressDescription(
300 uptr tagged_addr, uptr access_size,
301 StackAllocationsRingBuffer *current_stack_allocations) {
302 Decorator d;
303 int num_descriptions_printed = 0;
304 uptr untagged_addr = UntagAddr(tagged_addr);
305
306 // Print some very basic information about the address, if it's a heap.
307 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
308 if (uptr beg = chunk.Beg()) {
309 uptr size = chunk.ActualSize();
310 Printf("%s[%p,%p) is a %s %s heap chunk; "
311 "size: %zd offset: %zd\n%s",
312 d.Location(),
313 beg, beg + size,
314 chunk.FromSmallHeap() ? "small" : "large",
315 chunk.IsAllocated() ? "allocated" : "unallocated",
316 size, untagged_addr - beg,
317 d.Default());
318 }
319
320 // Check if this looks like a heap buffer overflow by scanning
321 // the shadow left and right and looking for the first adjacent
322 // object with a different memory tag. If that tag matches addr_tag,
323 // check the allocator if it has a live chunk there.
324 tag_t addr_tag = GetTagFromPointer(tagged_addr);
325 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
326 tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
327 for (int i = 0; i < 1000; i++) {
328 if (TagsEqual(addr_tag, left)) {
329 candidate = left;
330 break;
331 }
332 --left;
333 if (TagsEqual(addr_tag, right)) {
334 candidate = right;
335 break;
336 }
337 ++right;
338 }
339
340 if (candidate) {
341 uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
342 HwasanChunkView chunk = FindHeapChunkByAddress(mem);
343 if (chunk.IsAllocated()) {
344 Printf("%s", d.Location());
345 Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
346 untagged_addr,
347 candidate == left ? untagged_addr - chunk.End()
348 : chunk.Beg() - untagged_addr,
349 candidate == left ? "right" : "left", chunk.UsedSize(),
350 chunk.Beg(), chunk.End());
351 Printf("%s", d.Allocation());
352 Printf("allocated here:\n");
353 Printf("%s", d.Default());
354 GetStackTraceFromId(chunk.GetAllocStackId()).Print();
355 num_descriptions_printed++;
356 } else {
357 // Check whether the address points into a loaded library. If so, this is
358 // most likely a global variable.
359 const char *module_name;
360 uptr module_address;
361 Symbolizer *sym = Symbolizer::GetOrInit();
362 if (sym->GetModuleNameAndOffsetForPC(mem, &module_name,
363 &module_address)) {
364 DataInfo info;
365 if (sym->SymbolizeData(mem, &info) && info.start) {
366 Printf(
367 "%p is located %zd bytes to the %s of %zd-byte global variable "
368 "%s [%p,%p) in %s\n",
369 untagged_addr,
370 candidate == left ? untagged_addr - (info.start + info.size)
371 : info.start - untagged_addr,
372 candidate == left ? "right" : "left", info.size, info.name,
373 info.start, info.start + info.size, module_name);
374 } else {
375 uptr size = GetGlobalSizeFromDescriptor(mem);
376 if (size == 0)
377 // We couldn't find the size of the global from the descriptors.
378 Printf(
379 "%p is located to the %s of a global variable in (%s+0x%x)\n",
380 untagged_addr, candidate == left ? "right" : "left",
381 module_name, module_address);
382 else
383 Printf(
384 "%p is located to the %s of a %zd-byte global variable in "
385 "(%s+0x%x)\n",
386 untagged_addr, candidate == left ? "right" : "left", size,
387 module_name, module_address);
388 }
389 num_descriptions_printed++;
390 }
391 }
392 }
393
394 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
395 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
396 HeapAllocationRecord har;
397 uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
398 if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
399 &ring_index, &num_matching_addrs,
400 &num_matching_addrs_4b)) {
401 Printf("%s", d.Location());
402 Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
403 untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
404 har.requested_size, UntagAddr(har.tagged_addr),
405 UntagAddr(har.tagged_addr) + har.requested_size);
406 Printf("%s", d.Allocation());
407 Printf("freed by thread T%zd here:\n", t->unique_id());
408 Printf("%s", d.Default());
409 GetStackTraceFromId(har.free_context_id).Print();
410
411 Printf("%s", d.Allocation());
412 Printf("previously allocated here:\n", t);
413 Printf("%s", d.Default());
414 GetStackTraceFromId(har.alloc_context_id).Print();
415
416 // Print a developer note: the index of this heap object
417 // in the thread's deallocation ring buffer.
418 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
419 flags()->heap_history_size);
420 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
421 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
422 num_matching_addrs_4b);
423
424 t->Announce();
425 num_descriptions_printed++;
426 }
427
428 // Very basic check for stack memory.
429 if (t->AddrIsInStack(untagged_addr)) {
430 Printf("%s", d.Location());
431 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
432 t->unique_id());
433 Printf("%s", d.Default());
434 t->Announce();
435
436 auto *sa = (t == GetCurrentThread() && current_stack_allocations)
437 ? current_stack_allocations
438 : t->stack_allocations();
439 PrintStackAllocations(sa, addr_tag, untagged_addr);
440 num_descriptions_printed++;
441 }
442 });
443
444 // Print the remaining threads, as an extra information, 1 line per thread.
445 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
446
447 if (!num_descriptions_printed)
448 // We exhausted our possibilities. Bail out.
449 Printf("HWAddressSanitizer can not describe address in more detail.\n");
450 }
451
ReportStats()452 void ReportStats() {}
453
PrintTagInfoAroundAddr(tag_t * tag_ptr,uptr num_rows,void (* print_tag)(InternalScopedString & s,tag_t * tag))454 static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
455 void (*print_tag)(InternalScopedString &s,
456 tag_t *tag)) {
457 const uptr row_len = 16; // better be power of two.
458 tag_t *center_row_beg = reinterpret_cast<tag_t *>(
459 RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
460 tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
461 tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
462 InternalScopedString s(GetPageSizeCached() * 8);
463 for (tag_t *row = beg_row; row < end_row; row += row_len) {
464 s.append("%s", row == center_row_beg ? "=>" : " ");
465 s.append("%p:", row);
466 for (uptr i = 0; i < row_len; i++) {
467 s.append("%s", row + i == tag_ptr ? "[" : " ");
468 print_tag(s, &row[i]);
469 s.append("%s", row + i == tag_ptr ? "]" : " ");
470 }
471 s.append("\n");
472 }
473 Printf("%s", s.data());
474 }
475
PrintTagsAroundAddr(tag_t * tag_ptr)476 static void PrintTagsAroundAddr(tag_t *tag_ptr) {
477 Printf(
478 "Memory tags around the buggy address (one tag corresponds to %zd "
479 "bytes):\n", kShadowAlignment);
480 PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
481 s.append("%02x", *tag);
482 });
483
484 Printf(
485 "Tags for short granules around the buggy address (one tag corresponds "
486 "to %zd bytes):\n",
487 kShadowAlignment);
488 PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
489 if (*tag >= 1 && *tag <= kShadowAlignment) {
490 uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
491 s.append("%02x",
492 *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
493 } else {
494 s.append("..");
495 }
496 });
497 Printf(
498 "See "
499 "https://clang.llvm.org/docs/"
500 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
501 "description of short granule tags\n");
502 }
503
ReportInvalidFree(StackTrace * stack,uptr tagged_addr)504 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
505 ScopedReport R(flags()->halt_on_error);
506
507 uptr untagged_addr = UntagAddr(tagged_addr);
508 tag_t ptr_tag = GetTagFromPointer(tagged_addr);
509 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
510 tag_t mem_tag = *tag_ptr;
511 Decorator d;
512 Printf("%s", d.Error());
513 uptr pc = stack->size ? stack->trace[0] : 0;
514 const char *bug_type = "invalid-free";
515 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
516 untagged_addr, pc);
517 Printf("%s", d.Access());
518 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
519 Printf("%s", d.Default());
520
521 stack->Print();
522
523 PrintAddressDescription(tagged_addr, 0, nullptr);
524
525 PrintTagsAroundAddr(tag_ptr);
526
527 ReportErrorSummary(bug_type, stack);
528 }
529
ReportTailOverwritten(StackTrace * stack,uptr tagged_addr,uptr orig_size,const u8 * expected)530 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
531 const u8 *expected) {
532 uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
533 ScopedReport R(flags()->halt_on_error);
534 Decorator d;
535 uptr untagged_addr = UntagAddr(tagged_addr);
536 Printf("%s", d.Error());
537 const char *bug_type = "allocation-tail-overwritten";
538 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
539 bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
540 Printf("\n%s", d.Default());
541 stack->Print();
542 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
543 if (chunk.Beg()) {
544 Printf("%s", d.Allocation());
545 Printf("allocated here:\n");
546 Printf("%s", d.Default());
547 GetStackTraceFromId(chunk.GetAllocStackId()).Print();
548 }
549
550 InternalScopedString s(GetPageSizeCached() * 8);
551 CHECK_GT(tail_size, 0U);
552 CHECK_LT(tail_size, kShadowAlignment);
553 u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
554 s.append("Tail contains: ");
555 for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
556 s.append(".. ");
557 for (uptr i = 0; i < tail_size; i++)
558 s.append("%02x ", tail[i]);
559 s.append("\n");
560 s.append("Expected: ");
561 for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
562 s.append(".. ");
563 for (uptr i = 0; i < tail_size; i++)
564 s.append("%02x ", expected[i]);
565 s.append("\n");
566 s.append(" ");
567 for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
568 s.append(" ");
569 for (uptr i = 0; i < tail_size; i++)
570 s.append("%s ", expected[i] != tail[i] ? "^^" : " ");
571
572 s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
573 "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
574 " char *x = new char[20];\n"
575 " x[25] = 42;\n"
576 "%s does not detect such bugs in uninstrumented code at the time of write,"
577 "\nbut can detect them at the time of free/delete.\n"
578 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
579 kShadowAlignment, SanitizerToolName);
580 Printf("%s", s.data());
581 GetCurrentThread()->Announce();
582
583 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
584 PrintTagsAroundAddr(tag_ptr);
585
586 ReportErrorSummary(bug_type, stack);
587 }
588
ReportTagMismatch(StackTrace * stack,uptr tagged_addr,uptr access_size,bool is_store,bool fatal,uptr * registers_frame)589 void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
590 bool is_store, bool fatal, uptr *registers_frame) {
591 ScopedReport R(fatal);
592 SavedStackAllocations current_stack_allocations(
593 GetCurrentThread()->stack_allocations());
594
595 Decorator d;
596 Printf("%s", d.Error());
597 uptr untagged_addr = UntagAddr(tagged_addr);
598 // TODO: when possible, try to print heap-use-after-free, etc.
599 const char *bug_type = "tag-mismatch";
600 uptr pc = stack->size ? stack->trace[0] : 0;
601 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
602 untagged_addr, pc);
603
604 Thread *t = GetCurrentThread();
605
606 sptr offset =
607 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
608 CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
609 tag_t ptr_tag = GetTagFromPointer(tagged_addr);
610 tag_t *tag_ptr =
611 reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
612 tag_t mem_tag = *tag_ptr;
613
614 Printf("%s", d.Access());
615 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
616 is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
617 mem_tag, t->unique_id());
618 if (offset != 0)
619 Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
620 Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
621 Printf("%s", d.Default());
622
623 stack->Print();
624
625 PrintAddressDescription(tagged_addr, access_size,
626 current_stack_allocations.get());
627 t->Announce();
628
629 PrintTagsAroundAddr(tag_ptr);
630
631 if (registers_frame)
632 ReportRegisters(registers_frame, pc);
633
634 ReportErrorSummary(bug_type, stack);
635 }
636
637 // See the frame breakdown defined in __hwasan_tag_mismatch (from
638 // hwasan_tag_mismatch_aarch64.S).
ReportRegisters(uptr * frame,uptr pc)639 void ReportRegisters(uptr *frame, uptr pc) {
640 Printf("Registers where the failure occurred (pc %p):\n", pc);
641
642 // We explicitly print a single line (4 registers/line) each iteration to
643 // reduce the amount of logcat error messages printed. Each Printf() will
644 // result in a new logcat line, irrespective of whether a newline is present,
645 // and so we wish to reduce the number of Printf() calls we have to make.
646 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
647 frame[0], frame[1], frame[2], frame[3]);
648 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
649 frame[4], frame[5], frame[6], frame[7]);
650 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
651 frame[8], frame[9], frame[10], frame[11]);
652 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
653 frame[12], frame[13], frame[14], frame[15]);
654 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
655 frame[16], frame[17], frame[18], frame[19]);
656 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
657 frame[20], frame[21], frame[22], frame[23]);
658 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
659 frame[24], frame[25], frame[26], frame[27]);
660 Printf(" x28 %016llx x29 %016llx x30 %016llx\n",
661 frame[28], frame[29], frame[30]);
662 }
663
664 } // namespace __hwasan
665
__hwasan_set_error_report_callback(void (* callback)(const char *))666 void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
667 __hwasan::ScopedReport::SetErrorReportCallback(callback);
668 }
669