1 //===-- asan_thread.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_allocator.h"
14 #include "asan_interceptors.h"
15 #include "asan_poisoning.h"
16 #include "asan_stack.h"
17 #include "asan_thread.h"
18 #include "asan_mapping.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "lsan/lsan_common.h"
24
25 namespace __asan {
26
27 // AsanThreadContext implementation.
28
OnCreated(void * arg)29 void AsanThreadContext::OnCreated(void *arg) {
30 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
31 if (args->stack)
32 stack_id = StackDepotPut(*args->stack);
33 thread = args->thread;
34 thread->set_context(this);
35 }
36
OnFinished()37 void AsanThreadContext::OnFinished() {
38 // Drop the link to the AsanThread object.
39 thread = nullptr;
40 }
41
42 // MIPS requires aligned address
43 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
44 static ThreadRegistry *asan_thread_registry;
45
46 static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
47 static LowLevelAllocator allocator_for_thread_context;
48
GetAsanThreadContext(u32 tid)49 static ThreadContextBase *GetAsanThreadContext(u32 tid) {
50 BlockingMutexLock lock(&mu_for_thread_context);
51 return new(allocator_for_thread_context) AsanThreadContext(tid);
52 }
53
asanThreadRegistry()54 ThreadRegistry &asanThreadRegistry() {
55 static bool initialized;
56 // Don't worry about thread_safety - this should be called when there is
57 // a single thread.
58 if (!initialized) {
59 // Never reuse ASan threads: we store pointer to AsanThreadContext
60 // in TSD and can't reliably tell when no more TSD destructors will
61 // be called. It would be wrong to reuse AsanThreadContext for another
62 // thread before all TSD destructors will be called for it.
63 asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
64 GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
65 initialized = true;
66 }
67 return *asan_thread_registry;
68 }
69
GetThreadContextByTidLocked(u32 tid)70 AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
71 return static_cast<AsanThreadContext *>(
72 asanThreadRegistry().GetThreadLocked(tid));
73 }
74
75 // AsanThread implementation.
76
Create(thread_callback_t start_routine,void * arg,u32 parent_tid,StackTrace * stack,bool detached)77 AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
78 u32 parent_tid, StackTrace *stack,
79 bool detached) {
80 uptr PageSize = GetPageSizeCached();
81 uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
82 AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
83 thread->start_routine_ = start_routine;
84 thread->arg_ = arg;
85 AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
86 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
87 parent_tid, &args);
88
89 return thread;
90 }
91
TSDDtor(void * tsd)92 void AsanThread::TSDDtor(void *tsd) {
93 AsanThreadContext *context = (AsanThreadContext*)tsd;
94 VReport(1, "T%d TSDDtor\n", context->tid);
95 if (context->thread)
96 context->thread->Destroy();
97 }
98
Destroy()99 void AsanThread::Destroy() {
100 int tid = this->tid();
101 VReport(1, "T%d exited\n", tid);
102
103 malloc_storage().CommitBack();
104 if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
105 asanThreadRegistry().FinishThread(tid);
106 FlushToDeadThreadStats(&stats_);
107 // We also clear the shadow on thread destruction because
108 // some code may still be executing in later TSD destructors
109 // and we don't want it to have any poisoned stack.
110 ClearShadowForThreadStackAndTLS();
111 DeleteFakeStack(tid);
112 uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
113 UnmapOrDie(this, size);
114 DTLS_Destroy();
115 }
116
StartSwitchFiber(FakeStack ** fake_stack_save,uptr bottom,uptr size)117 void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
118 uptr size) {
119 if (atomic_load(&stack_switching_, memory_order_relaxed)) {
120 Report("ERROR: starting fiber switch while in fiber switch\n");
121 Die();
122 }
123
124 next_stack_bottom_ = bottom;
125 next_stack_top_ = bottom + size;
126 atomic_store(&stack_switching_, 1, memory_order_release);
127
128 FakeStack *current_fake_stack = fake_stack_;
129 if (fake_stack_save)
130 *fake_stack_save = fake_stack_;
131 fake_stack_ = nullptr;
132 SetTLSFakeStack(nullptr);
133 // if fake_stack_save is null, the fiber will die, delete the fakestack
134 if (!fake_stack_save && current_fake_stack)
135 current_fake_stack->Destroy(this->tid());
136 }
137
FinishSwitchFiber(FakeStack * fake_stack_save,uptr * bottom_old,uptr * size_old)138 void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
139 uptr *bottom_old,
140 uptr *size_old) {
141 if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
142 Report("ERROR: finishing a fiber switch that has not started\n");
143 Die();
144 }
145
146 if (fake_stack_save) {
147 SetTLSFakeStack(fake_stack_save);
148 fake_stack_ = fake_stack_save;
149 }
150
151 if (bottom_old)
152 *bottom_old = stack_bottom_;
153 if (size_old)
154 *size_old = stack_top_ - stack_bottom_;
155 stack_bottom_ = next_stack_bottom_;
156 stack_top_ = next_stack_top_;
157 atomic_store(&stack_switching_, 0, memory_order_release);
158 next_stack_top_ = 0;
159 next_stack_bottom_ = 0;
160 }
161
GetStackBounds() const162 inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
163 if (!atomic_load(&stack_switching_, memory_order_acquire)) {
164 // Make sure the stack bounds are fully initialized.
165 if (stack_bottom_ >= stack_top_) return {0, 0};
166 return {stack_bottom_, stack_top_};
167 }
168 char local;
169 const uptr cur_stack = (uptr)&local;
170 // Note: need to check next stack first, because FinishSwitchFiber
171 // may be in process of overwriting stack_top_/bottom_. But in such case
172 // we are already on the next stack.
173 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
174 return {next_stack_bottom_, next_stack_top_};
175 return {stack_bottom_, stack_top_};
176 }
177
stack_top()178 uptr AsanThread::stack_top() {
179 return GetStackBounds().top;
180 }
181
stack_bottom()182 uptr AsanThread::stack_bottom() {
183 return GetStackBounds().bottom;
184 }
185
stack_size()186 uptr AsanThread::stack_size() {
187 const auto bounds = GetStackBounds();
188 return bounds.top - bounds.bottom;
189 }
190
191 // We want to create the FakeStack lazily on the first use, but not earlier
192 // than the stack size is known and the procedure has to be async-signal safe.
AsyncSignalSafeLazyInitFakeStack()193 FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
194 uptr stack_size = this->stack_size();
195 if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
196 return nullptr;
197 uptr old_val = 0;
198 // fake_stack_ has 3 states:
199 // 0 -- not initialized
200 // 1 -- being initialized
201 // ptr -- initialized
202 // This CAS checks if the state was 0 and if so changes it to state 1,
203 // if that was successful, it initializes the pointer.
204 if (atomic_compare_exchange_strong(
205 reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
206 memory_order_relaxed)) {
207 uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
208 CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
209 stack_size_log =
210 Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
211 stack_size_log =
212 Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
213 fake_stack_ = FakeStack::Create(stack_size_log);
214 DCHECK_EQ(GetCurrentThread(), this);
215 SetTLSFakeStack(fake_stack_);
216 return fake_stack_;
217 }
218 return nullptr;
219 }
220
Init(const InitOptions * options)221 void AsanThread::Init(const InitOptions *options) {
222 DCHECK_NE(tid(), ThreadRegistry::kUnknownTid);
223 next_stack_top_ = next_stack_bottom_ = 0;
224 atomic_store(&stack_switching_, false, memory_order_release);
225 CHECK_EQ(this->stack_size(), 0U);
226 SetThreadStackAndTls(options);
227 if (stack_top_ != stack_bottom_) {
228 CHECK_GT(this->stack_size(), 0U);
229 CHECK(AddrIsInMem(stack_bottom_));
230 CHECK(AddrIsInMem(stack_top_ - 1));
231 }
232 ClearShadowForThreadStackAndTLS();
233 fake_stack_ = nullptr;
234 if (__asan_option_detect_stack_use_after_return &&
235 tid() == GetCurrentTidOrInvalid()) {
236 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
237 // called from the context of the thread it is initializing, not its parent.
238 // Most platforms call AsanThread::Init on the newly-spawned thread, but
239 // Fuchsia calls this function from the parent thread. To support that
240 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
241 // be called by the new thread when it first attempts to access the fake
242 // stack.
243 AsyncSignalSafeLazyInitFakeStack();
244 }
245 int local = 0;
246 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
247 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
248 &local);
249 }
250
251 // Fuchsia and RTEMS don't use ThreadStart.
252 // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
253 // SetThreadStackAndTls.
254 #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
255
ThreadStart(tid_t os_id,atomic_uintptr_t * signal_thread_is_registered)256 thread_return_t AsanThread::ThreadStart(
257 tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
258 Init();
259 asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
260 if (signal_thread_is_registered)
261 atomic_store(signal_thread_is_registered, 1, memory_order_release);
262
263 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
264
265 if (!start_routine_) {
266 // start_routine_ == 0 if we're on the main thread or on one of the
267 // OS X libdispatch worker threads. But nobody is supposed to call
268 // ThreadStart() for the worker threads.
269 CHECK_EQ(tid(), 0);
270 return 0;
271 }
272
273 thread_return_t res = start_routine_(arg_);
274
275 // On POSIX systems we defer this to the TSD destructor. LSan will consider
276 // the thread's memory as non-live from the moment we call Destroy(), even
277 // though that memory might contain pointers to heap objects which will be
278 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
279 // the TSD destructors have run might cause false positives in LSan.
280 if (!SANITIZER_POSIX)
281 this->Destroy();
282
283 return res;
284 }
285
CreateMainThread()286 AsanThread *CreateMainThread() {
287 AsanThread *main_thread = AsanThread::Create(
288 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
289 /* stack */ nullptr, /* detached */ true);
290 SetCurrentThread(main_thread);
291 main_thread->ThreadStart(internal_getpid(),
292 /* signal_thread_is_registered */ nullptr);
293 return main_thread;
294 }
295
296 // This implementation doesn't use the argument, which is just passed down
297 // from the caller of Init (which see, above). It's only there to support
298 // OS-specific implementations that need more information passed through.
SetThreadStackAndTls(const InitOptions * options)299 void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
300 DCHECK_EQ(options, nullptr);
301 uptr tls_size = 0;
302 uptr stack_size = 0;
303 GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
304 &tls_size);
305 stack_top_ = stack_bottom_ + stack_size;
306 tls_end_ = tls_begin_ + tls_size;
307 dtls_ = DTLS_Get();
308
309 if (stack_top_ != stack_bottom_) {
310 int local;
311 CHECK(AddrIsInStack((uptr)&local));
312 }
313 }
314
315 #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
316
ClearShadowForThreadStackAndTLS()317 void AsanThread::ClearShadowForThreadStackAndTLS() {
318 if (stack_top_ != stack_bottom_)
319 PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
320 if (tls_begin_ != tls_end_) {
321 uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
322 uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
323 FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
324 tls_end_ - tls_begin_aligned,
325 tls_end_aligned - tls_end_, 0);
326 }
327 }
328
GetStackFrameAccessByAddr(uptr addr,StackFrameAccess * access)329 bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
330 StackFrameAccess *access) {
331 if (stack_top_ == stack_bottom_)
332 return false;
333
334 uptr bottom = 0;
335 if (AddrIsInStack(addr)) {
336 bottom = stack_bottom();
337 } else if (has_fake_stack()) {
338 bottom = fake_stack()->AddrIsInFakeStack(addr);
339 CHECK(bottom);
340 access->offset = addr - bottom;
341 access->frame_pc = ((uptr*)bottom)[2];
342 access->frame_descr = (const char *)((uptr*)bottom)[1];
343 return true;
344 }
345 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
346 uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
347 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
348 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
349
350 while (shadow_ptr >= shadow_bottom &&
351 *shadow_ptr != kAsanStackLeftRedzoneMagic) {
352 shadow_ptr--;
353 mem_ptr -= SHADOW_GRANULARITY;
354 }
355
356 while (shadow_ptr >= shadow_bottom &&
357 *shadow_ptr == kAsanStackLeftRedzoneMagic) {
358 shadow_ptr--;
359 mem_ptr -= SHADOW_GRANULARITY;
360 }
361
362 if (shadow_ptr < shadow_bottom) {
363 return false;
364 }
365
366 uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
367 CHECK(ptr[0] == kCurrentStackFrameMagic);
368 access->offset = addr - (uptr)ptr;
369 access->frame_pc = ptr[2];
370 access->frame_descr = (const char*)ptr[1];
371 return true;
372 }
373
GetStackVariableShadowStart(uptr addr)374 uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
375 uptr bottom = 0;
376 if (AddrIsInStack(addr)) {
377 bottom = stack_bottom();
378 } else if (has_fake_stack()) {
379 bottom = fake_stack()->AddrIsInFakeStack(addr);
380 if (bottom == 0) {
381 return 0;
382 }
383 } else {
384 return 0;
385 }
386
387 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
388 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
389 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
390
391 while (shadow_ptr >= shadow_bottom &&
392 (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
393 *shadow_ptr != kAsanStackMidRedzoneMagic &&
394 *shadow_ptr != kAsanStackRightRedzoneMagic))
395 shadow_ptr--;
396
397 return (uptr)shadow_ptr + 1;
398 }
399
AddrIsInStack(uptr addr)400 bool AsanThread::AddrIsInStack(uptr addr) {
401 const auto bounds = GetStackBounds();
402 return addr >= bounds.bottom && addr < bounds.top;
403 }
404
ThreadStackContainsAddress(ThreadContextBase * tctx_base,void * addr)405 static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
406 void *addr) {
407 AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
408 AsanThread *t = tctx->thread;
409 if (!t) return false;
410 if (t->AddrIsInStack((uptr)addr)) return true;
411 if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
412 return true;
413 return false;
414 }
415
GetCurrentThread()416 AsanThread *GetCurrentThread() {
417 if (SANITIZER_RTEMS && !asan_inited)
418 return nullptr;
419
420 AsanThreadContext *context =
421 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
422 if (!context) {
423 if (SANITIZER_ANDROID) {
424 // On Android, libc constructor is called _after_ asan_init, and cleans up
425 // TSD. Try to figure out if this is still the main thread by the stack
426 // address. We are not entirely sure that we have correct main thread
427 // limits, so only do this magic on Android, and only if the found thread
428 // is the main thread.
429 AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
430 if (tctx && ThreadStackContainsAddress(tctx, &context)) {
431 SetCurrentThread(tctx->thread);
432 return tctx->thread;
433 }
434 }
435 return nullptr;
436 }
437 return context->thread;
438 }
439
SetCurrentThread(AsanThread * t)440 void SetCurrentThread(AsanThread *t) {
441 CHECK(t->context());
442 VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
443 (void *)GetThreadSelf());
444 // Make sure we do not reset the current AsanThread.
445 CHECK_EQ(0, AsanTSDGet());
446 AsanTSDSet(t->context());
447 CHECK_EQ(t->context(), AsanTSDGet());
448 }
449
GetCurrentTidOrInvalid()450 u32 GetCurrentTidOrInvalid() {
451 AsanThread *t = GetCurrentThread();
452 return t ? t->tid() : kInvalidTid;
453 }
454
FindThreadByStackAddress(uptr addr)455 AsanThread *FindThreadByStackAddress(uptr addr) {
456 asanThreadRegistry().CheckLocked();
457 AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
458 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
459 (void *)addr));
460 return tctx ? tctx->thread : nullptr;
461 }
462
EnsureMainThreadIDIsCorrect()463 void EnsureMainThreadIDIsCorrect() {
464 AsanThreadContext *context =
465 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
466 if (context && (context->tid == 0))
467 context->os_id = GetTid();
468 }
469
GetAsanThreadByOsIDLocked(tid_t os_id)470 __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
471 __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
472 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
473 if (!context) return nullptr;
474 return context->thread;
475 }
476 } // namespace __asan
477
478 // --- Implementation of LSan-specific functions --- {{{1
479 namespace __lsan {
GetThreadRangesLocked(tid_t os_id,uptr * stack_begin,uptr * stack_end,uptr * tls_begin,uptr * tls_end,uptr * cache_begin,uptr * cache_end,DTLS ** dtls)480 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
481 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
482 uptr *cache_end, DTLS **dtls) {
483 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
484 if (!t) return false;
485 *stack_begin = t->stack_bottom();
486 *stack_end = t->stack_top();
487 *tls_begin = t->tls_begin();
488 *tls_end = t->tls_end();
489 // ASan doesn't keep allocator caches in TLS, so these are unused.
490 *cache_begin = 0;
491 *cache_end = 0;
492 *dtls = t->dtls();
493 return true;
494 }
495
GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> * caches)496 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
497
ForEachExtraStackRange(tid_t os_id,RangeIteratorCallback callback,void * arg)498 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
499 void *arg) {
500 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
501 if (t && t->has_fake_stack())
502 t->fake_stack()->ForEachFakeFrame(callback, arg);
503 }
504
LockThreadRegistry()505 void LockThreadRegistry() {
506 __asan::asanThreadRegistry().Lock();
507 }
508
UnlockThreadRegistry()509 void UnlockThreadRegistry() {
510 __asan::asanThreadRegistry().Unlock();
511 }
512
GetThreadRegistryLocked()513 ThreadRegistry *GetThreadRegistryLocked() {
514 __asan::asanThreadRegistry().CheckLocked();
515 return &__asan::asanThreadRegistry();
516 }
517
EnsureMainThreadIDIsCorrect()518 void EnsureMainThreadIDIsCorrect() {
519 __asan::EnsureMainThreadIDIsCorrect();
520 }
521 } // namespace __lsan
522
523 // ---------------------- Interface ---------------- {{{1
524 using namespace __asan;
525
526 extern "C" {
527 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_start_switch_fiber(void ** fakestacksave,const void * bottom,uptr size)528 void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
529 uptr size) {
530 AsanThread *t = GetCurrentThread();
531 if (!t) {
532 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
533 return;
534 }
535 t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
536 }
537
538 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_finish_switch_fiber(void * fakestack,const void ** bottom_old,uptr * size_old)539 void __sanitizer_finish_switch_fiber(void* fakestack,
540 const void **bottom_old,
541 uptr *size_old) {
542 AsanThread *t = GetCurrentThread();
543 if (!t) {
544 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
545 return;
546 }
547 t->FinishSwitchFiber((FakeStack*)fakestack,
548 (uptr*)bottom_old,
549 (uptr*)size_old);
550 }
551 }
552