• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
7 
8 #include <atomic>
9 #include <type_traits>
10 
11 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
12 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
14 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
16 #include "base/allocator/partition_allocator/partition_alloc_check.h"
17 #include "base/allocator/partition_allocator/pkey.h"
18 #include "base/allocator/partition_allocator/spinning_mutex.h"
19 #include "build/build_config.h"
20 
21 namespace partition_alloc::internal {
22 
23 class PA_LOCKABLE Lock {
24  public:
25   inline constexpr Lock();
Acquire()26   void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
27 #if BUILDFLAG(PA_DCHECK_IS_ON)
28 #if BUILDFLAG(ENABLE_PKEYS)
29     LiftPkeyRestrictionsScope lift_pkey_restrictions;
30 #endif
31 
32     // When PartitionAlloc is malloc(), it can easily become reentrant. For
33     // instance, a DCHECK() triggers in external code (such as
34     // base::Lock). DCHECK() error message formatting allocates, which triggers
35     // PartitionAlloc, and then we get reentrancy, and in this case infinite
36     // recursion.
37     //
38     // To avoid that, crash quickly when the code becomes reentrant.
39     base::PlatformThreadRef current_thread = base::PlatformThread::CurrentRef();
40     if (!lock_.Try()) {
41       // The lock wasn't free when we tried to acquire it. This can be because
42       // another thread or *this* thread was holding it.
43       //
44       // If it's this thread holding it, then it cannot have become free in the
45       // meantime, and the current value of |owning_thread_ref_| is valid, as it
46       // was set by this thread. Assuming that writes to |owning_thread_ref_|
47       // are atomic, then if it's us, we are trying to recursively acquire a
48       // non-recursive lock.
49       //
50       // Note that we don't rely on a DCHECK() in base::Lock(), as it would
51       // itself allocate. Meaning that without this code, a reentrancy issue
52       // hangs on Linux.
53       if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
54                       current_thread)) {
55         // Trying to acquire lock while it's held by this thread: reentrancy
56         // issue.
57         PA_IMMEDIATE_CRASH();
58       }
59       lock_.Acquire();
60     }
61     owning_thread_ref_.store(current_thread, std::memory_order_release);
62 #else
63     lock_.Acquire();
64 #endif
65   }
66 
Release()67   void Release() PA_UNLOCK_FUNCTION() {
68 #if BUILDFLAG(PA_DCHECK_IS_ON)
69     owning_thread_ref_.store(base::PlatformThreadRef(),
70                              std::memory_order_release);
71 #endif
72     lock_.Release();
73   }
AssertAcquired()74   void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
75     lock_.AssertAcquired();
76 #if BUILDFLAG(PA_DCHECK_IS_ON)
77 #if BUILDFLAG(ENABLE_PKEYS)
78     LiftPkeyRestrictionsScope lift_pkey_restrictions;
79 #endif
80     PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
81               base::PlatformThread::CurrentRef());
82 #endif
83   }
84 
Reinit()85   void Reinit() PA_UNLOCK_FUNCTION() {
86     lock_.AssertAcquired();
87 #if BUILDFLAG(PA_DCHECK_IS_ON)
88     owning_thread_ref_.store(base::PlatformThreadRef(),
89                              std::memory_order_release);
90 #endif
91     lock_.Reinit();
92   }
93 
94  private:
95   SpinningMutex lock_;
96 
97 #if BUILDFLAG(PA_DCHECK_IS_ON)
98   // Should in theory be protected by |lock_|, but we need to read it to detect
99   // recursive lock acquisition (and thus, the allocator becoming reentrant).
100   std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
101       base::PlatformThreadRef();
102 #endif
103 };
104 
105 class PA_SCOPED_LOCKABLE ScopedGuard {
106  public:
ScopedGuard(Lock & lock)107   explicit ScopedGuard(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
108       : lock_(lock) {
109     lock_.Acquire();
110   }
PA_UNLOCK_FUNCTION()111   ~ScopedGuard() PA_UNLOCK_FUNCTION() { lock_.Release(); }
112 
113  private:
114   Lock& lock_;
115 };
116 
117 class PA_SCOPED_LOCKABLE ScopedUnlockGuard {
118  public:
ScopedUnlockGuard(Lock & lock)119   explicit ScopedUnlockGuard(Lock& lock) PA_UNLOCK_FUNCTION(lock)
120       : lock_(lock) {
121     lock_.Release();
122   }
PA_EXCLUSIVE_LOCK_FUNCTION()123   ~ScopedUnlockGuard() PA_EXCLUSIVE_LOCK_FUNCTION() { lock_.Acquire(); }
124 
125  private:
126   Lock& lock_;
127 };
128 
129 constexpr Lock::Lock() = default;
130 
131 // We want PartitionRoot to not have a global destructor, so this should not
132 // have one.
133 static_assert(std::is_trivially_destructible<Lock>::value, "");
134 
135 }  // namespace partition_alloc::internal
136 
137 namespace base {
138 namespace internal {
139 
140 using PartitionLock = ::partition_alloc::internal::Lock;
141 using PartitionAutoLock = ::partition_alloc::internal::ScopedGuard;
142 
143 }  // namespace internal
144 }  // namespace base
145 
146 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
147