• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #pragma once
2 
3 /*
4  * Copyright (C) 2017 The Android Open Source Project
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 // Memory layout for locks of all types.
20 
21 // The vsoc::layout namespace indicates that these are shared memory structure
22 // definitions. The #include's given above are strictly limited, as are the
23 // types that can be referenced below.
24 
25 // For _mm_pause()
26 #if defined(__SSE2__)
27 #include <x86intrin.h>
28 #define _pause() _mm_pause()
29 #elif defined(__arm__) || defined(__aarch64__)
30 #include <arm_acle.h>
31 #define _pause() __yield()
32 #endif
33 
34 #include <atomic>
35 #include <cstdint>
36 
37 #include "common/vsoc/shm/base.h"
38 
39 // Host userspace, guest userspace, and the guest kernel must all agree on
40 // the relationship between std::atomic and atomic_t. That's hard to do without
41 // examining assembly, and we can't really examing atomic_t outside of the
42 // kernel tree, but we can at least assert that the host and the guest
43 // agree on a size.
44 static_assert(sizeof(std::atomic<uint32_t>) == 4, "std::atomic size mismatch");
45 
46 namespace vsoc {
47 
48 class RegionView;
49 
50 namespace layout {
51 
52 /**
53  * Lock that causes threads to busy loop rather than sleeping.
54  * This lock should never be used when the amount of work in the critical
55  * section cannot be bounded.
56  */
57 class SpinLock {
58  public:
59   static constexpr size_t layout_size = 4;
60 
61   /**
62    * Acquire the spinlock on the queue. This will effectively block all
63    * readers and writers.
64    */
Lock()65   void Lock() {
66     while (1) {
67       uint32_t expected = 0;
68       if (lock_.compare_exchange_strong(expected, Sides::OurSide)) {
69         return;
70       }
71       _pause();
72     }
73   }
74 
75   /**
76    * Drop the lock iff it is currently held by this side. Used by
77    * recovery code that cleans up regions in the event of a reboot
78    * (guest side) or a service restart (host side).
79    *
80    * The caller must ensure that there are no other threads on its
81    * side (e.g. guest/host) are using the window.
82    */
Recover()83   bool Recover() {
84     uint32_t expected = Sides::OurSide;
85     return lock_.compare_exchange_strong(expected, 0);
86   }
87 
88   /**
89    * Release the spinlock.
90    */
Unlock()91   void Unlock() {
92     lock_ = 0;
93   }
94 
95  protected:
96   std::atomic<uint32_t> lock_;
97 };
98 ASSERT_SHM_COMPATIBLE(SpinLock);
99 
100 /**
101  * This is a generic synchronization primitive that provides space for the
102  * owner of the lock to write platform-specific information.
103  */
104 class WaitingLockBase {
105  public:
106   static constexpr size_t layout_size = 40;
107 
108  protected:
109   // Common code to handle locking
110   // Must be called with the kernel's thread id
111   // Returns true if the lock was acquired. In this case the value in
112   // expected_vlaue is undefined.
113   // Returns false if locking failed. The value discovered in the lock word
114   // is returned in expected_value, and should probably be used in a conditional
115   // sleep.
116   bool TryLock(uint32_t tid, uint32_t* expected_value);
117 
118   // Common code to handle unlocking.
119   // Must be called with the kernel's thread id
120   // Returns sides that should be signalled or 0
121   Sides UnlockCommon(uint32_t tid);
122 
123   // Common code to recover single-sided locks.
124   bool RecoverSingleSided();
125 
126   // Non-zero values in this word indicate that the lock is in use.
127   // This is 32 bits for compatibility with futex()
128   std::atomic<uint32_t> lock_uint32_;
129 
130   // Pad so we line up with glib's pthread_mutex_t and can share the same queue.
131   // These fields may be redefined at any point in the future. They should not
132   // be used.
133  private:
134 // These fields are known to be unused and are provided for compatibility
135 // with glibc's locks.
136 #pragma clang diagnostic push
137 #pragma clang diagnostic ignored "-Wunused-private-field"
138   uint32_t reserved_1_;
139   char reserved_2_[16];
140   // Provide scratch space for the owner of the lock. The content of this space
141   // is undefined when the lock is acquired. The owner may write to and read
142   // from it while it holds the lock, but must relinquish control before
143   // releasing the lock.
144   //
145   // This is intended to support Linux robust futexes. See the documentation
146   // in the kernel tree:
147   //   Documentation/robust-futex-ABI.txt
148  public:
149   int64_t owner_scratch_[2];
150 #pragma clang diagnostic pop
151 };
152 ASSERT_SHM_COMPATIBLE(WaitingLockBase);
153 
154 /**
155  * GuestLocks can be acquired and released only on the guest. They reside
156  * in the shared memory window because mutiple guest processes may need
157  * to coordinate activities in certain shared memory regions.
158  *
159  * Representing this as a concrete type allows for some optimizations when
160  * signalling on the lock.
161  */
162 class GuestLock : public WaitingLockBase {
163  public:
164   static constexpr size_t layout_size = WaitingLockBase::layout_size;
165 
166 #ifndef CUTTLEFISH_HOST
167   void Lock();
168   void Unlock();
169   /**
170    * Drop the lock iff it is currently held. Used by
171    * recovery code that cleans up regions in the event of a reboot.
172    *
173    * The caller must ensure that there are no other threads on its
174    * side (e.g. guest/host) are using the window.
175    */
176   bool Recover();
177 #endif
178 };
179 ASSERT_SHM_COMPATIBLE(GuestLock);
180 
181 /**
182  * HostLocks can be acquired and released only on the host. They reside
183  * in the shared memory window because mutiple host processes may need
184  * to coordinate activities in certain shared memory regions.
185  *
186  * Representing this as a concrete type allows for some optimizations when
187  * signalling on the lock.
188  */
189 class HostLock : public WaitingLockBase {
190  public:
191   static constexpr size_t layout_size = WaitingLockBase::layout_size;
192 
193 #ifdef CUTTLEFISH_HOST
194   void Lock();
195   void Unlock();
196   /**
197    * Drop the lock iff it is currently held. Used by
198    * recovery code that cleans up regions in the event of a daemon
199    * restart.
200    *
201    * The caller must ensure that there are no other threads on its
202    * side (e.g. guest/host) are using the window.
203    */
204   bool Recover();
205 #endif
206 };
207 ASSERT_SHM_COMPATIBLE(HostLock);
208 
209 /**
210  * GuestAndHostLocks can be acquired and released on either side of the
211  * shared memory window. The locks attempt to enforce fairness by using
212  * a round-trip signal:
213  *
214  *   When a guest releases a lock this code sends a signal to wake the host,
215  *   but not other guest waiters.
216  *
217  *   The wake handler on the host wakes up and local waiters and then reposts
218  *   the signal to the guest.
219  *
220  *   When the guest receives the signal from the host it then wakes ups
221  *   any waiters.
222  *
223  * A similar scenario applies when the host releases a lock with guest waiters.
224  *
225  * Signalling across the shared memory window twice has non-trivial cost.
226  * There are some optimizations in the code to prevent the full round-trip
227  * if the process releasing the lock can confirm that there are no waiters on
228  * the other side.
229  *
230  * Representing this as a concrete type allows for some optimizations when
231  * signalling on the lock.
232  */
233 class GuestAndHostLock : public WaitingLockBase {
234  public:
235   static constexpr size_t layout_size = WaitingLockBase::layout_size;
236 
237   void Lock(RegionView*);
238   void Unlock(RegionView*);
239   /**
240    * Drop the lock iff it is currently held by this side. Used by
241    * recovery code that cleans up regions in the event of a reboot
242    * (guest side) or a service restart (host side).
243    *
244    * The caller must ensure that there are no other threads on its
245    * side (e.g. guest/host) are using the window.
246    */
247   bool Recover(RegionView*);
248 };
249 ASSERT_SHM_COMPATIBLE(GuestAndHostLock);
250 
251 }  // namespace layout
252 }  // namespace vsoc
253