1 // Copyright 2016 The Android Open Source Project 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #pragma once 16 17 #include "aemu/base/Compiler.h" 18 19 namespace android { 20 21 // In QEMU2, each virtual CPU runs on its own host threads, but all these 22 // threads are synchronized through a global mutex, which allows the virtual 23 // device code to not care about them. 24 // 25 // However, if you have to call, from any other thread, a low-level QEMU 26 // function that operate on virtual devices (e.g. some Android pipe-related 27 // functions), you must acquire the global mutex before doing so, and release 28 // it after that. 29 30 // This header provides a convenience interface class you can use to do 31 // just that, i.e.: 32 // 33 // 1) To operate on the lock, call VmLock::get() to retrieve the 34 // current VmLock instance, then invoke its lock() and unlock() 35 // methods. 36 // 37 // 2) Glue code should call VmLock::set() to inject their own implementation 38 // into the process. The default implementation doesn't do anything. 39 // 40 41 class VmLock { 42 DISALLOW_COPY_ASSIGN_AND_MOVE(VmLock); 43 public: 44 VmLock() = default; 45 virtual ~VmLock(); 46 47 // Lock the VM global mutex. lock()48 virtual void lock() {} 49 50 // Unlock the VM global mutex. unlock()51 virtual void unlock() {} 52 53 // Returns true iff the lock is held by the current thread, false 54 // otherwise. Note that for a correct implementation, that doesn't 55 // only depend on the number of times that VmLock::lock() and 56 // VmLock::unlock() were called, but also on other QEMU threads that 57 // act on the global lock. isLockedBySelf()58 virtual bool isLockedBySelf() const { return true; } 59 60 // Return current VmLock instance. Cannot return nullptr. 61 // NOT thread-safe, but we don't expect multiple threads to call this 62 // concurrently at init time, and the worst that can happen is to leak 63 // a single instance. 64 static VmLock* get(); 65 66 // Returns whether or not there is a VmLock. 67 // Does not instantiate a VmLock. 68 static bool hasInstance(); 69 70 // Set new VmLock instance. Return old value, which cannot be nullptr and 71 // can be deleted by the caller. If |vmLock| is nullptr, a new default 72 // instance is created. NOTE: not thread-safe with regards to get(). 73 static VmLock* set(VmLock* vmLock); 74 }; 75 76 // Convenience class to perform scoped VM locking. 77 class ScopedVmLock { 78 DISALLOW_COPY_ASSIGN_AND_MOVE(ScopedVmLock); 79 public: mVmLock(vmLock)80 ScopedVmLock(VmLock* vmLock = VmLock::get()) : mVmLock(vmLock) { 81 mVmLock->lock(); 82 } 83 ~ScopedVmLock()84 ~ScopedVmLock() { 85 mVmLock->unlock(); 86 } 87 88 private: 89 VmLock* const mVmLock; 90 }; 91 92 // Convenience class to perform scoped VM locking (but does not try 93 // to lock twice). 94 class RecursiveScopedVmLock { 95 DISALLOW_COPY_ASSIGN_AND_MOVE(RecursiveScopedVmLock); 96 public: 97 RecursiveScopedVmLock(VmLock* vmLock = VmLock::get()) { 98 if (vmLock->isLockedBySelf()) { 99 mVmLock = nullptr; 100 } else { 101 mVmLock = vmLock; 102 vmLock->lock(); 103 } 104 } 105 ~RecursiveScopedVmLock()106 ~RecursiveScopedVmLock() { 107 if (mVmLock) { 108 mVmLock->unlock(); 109 } 110 } 111 112 private: 113 VmLock* mVmLock; 114 }; 115 116 // Convenience class to perform scoped VM locking (but does not try 117 // to lock twice), but no-ops if there is no instance. 118 class RecursiveScopedVmLockIfInstance { 119 DISALLOW_COPY_ASSIGN_AND_MOVE(RecursiveScopedVmLockIfInstance); 120 public: RecursiveScopedVmLockIfInstance()121 RecursiveScopedVmLockIfInstance() { 122 if (!VmLock::hasInstance()) return; 123 124 VmLock* vmLock = VmLock::get(); 125 126 if (vmLock->isLockedBySelf()) { 127 mVmLock = nullptr; 128 } else { 129 mVmLock = vmLock; 130 vmLock->lock(); 131 } 132 } 133 ~RecursiveScopedVmLockIfInstance()134 ~RecursiveScopedVmLockIfInstance() { 135 if (mVmLock) { 136 mVmLock->unlock(); 137 } 138 } 139 140 private: 141 VmLock* mVmLock = nullptr;; 142 }; 143 144 // Another convenience class for a code that may run either under a lock or not 145 // but needs to ensure that some part of it runs without a VmLock. 146 class ScopedVmUnlock { 147 DISALLOW_COPY_ASSIGN_AND_MOVE(ScopedVmUnlock); 148 public: 149 ScopedVmUnlock(VmLock* vmLock = VmLock::get()) { 150 if (vmLock->isLockedBySelf()) { 151 mVmLock = vmLock; 152 vmLock->unlock(); 153 } else { 154 mVmLock = nullptr; 155 } 156 } 157 ~ScopedVmUnlock()158 ~ScopedVmUnlock() { 159 if (mVmLock) { 160 mVmLock->lock(); 161 } 162 } 163 164 private: 165 VmLock* mVmLock; 166 }; 167 168 } // namespace android 169