• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #ifndef sw_MutexLock_hpp
16 #define sw_MutexLock_hpp
17 
18 #include "Thread.hpp"
19 
20 #if defined(__linux__)
21 // Use a pthread mutex on Linux. Since many processes may use SwiftShader
22 // at the same time it's best to just have the scheduler overhead.
23 #include <pthread.h>
24 
25 namespace sw
26 {
27 	class MutexLock
28 	{
29 	public:
MutexLock()30 		MutexLock()
31 		{
32 			pthread_mutex_init(&mutex, NULL);
33 		}
34 
~MutexLock()35 		~MutexLock()
36 		{
37 			pthread_mutex_destroy(&mutex);
38 		}
39 
attemptLock()40 		bool attemptLock()
41 		{
42 			return pthread_mutex_trylock(&mutex) == 0;
43 		}
44 
lock()45 		void lock()
46 		{
47 			pthread_mutex_lock(&mutex);
48 		}
49 
unlock()50 		void unlock()
51 		{
52 			pthread_mutex_unlock(&mutex);
53 		}
54 
55 	private:
56 		pthread_mutex_t mutex;
57 	};
58 }
59 
60 #else   // !__linux__
61 
62 #include <atomic>
63 
64 namespace sw
65 {
66 	class BackoffLock
67 	{
68 	public:
BackoffLock()69 		BackoffLock()
70 		{
71 			mutex = 0;
72 		}
73 
attemptLock()74 		bool attemptLock()
75 		{
76 			if(!isLocked())
77 			{
78 				if(mutex.exchange(true) == false)
79 				{
80 					return true;
81 				}
82 			}
83 
84 			return false;
85 		}
86 
lock()87 		void lock()
88 		{
89 			int backoff = 1;
90 
91 			while(!attemptLock())
92 			{
93 				if(backoff <= 64)
94 				{
95 					for(int i = 0; i < backoff; i++)
96 					{
97 						nop();
98 						nop();
99 						nop();
100 						nop();
101 						nop();
102 
103 						nop();
104 						nop();
105 						nop();
106 						nop();
107 						nop();
108 
109 						nop();
110 						nop();
111 						nop();
112 						nop();
113 						nop();
114 
115 						nop();
116 						nop();
117 						nop();
118 						nop();
119 						nop();
120 
121 						nop();
122 						nop();
123 						nop();
124 						nop();
125 						nop();
126 
127 						nop();
128 						nop();
129 						nop();
130 						nop();
131 						nop();
132 
133 						nop();
134 						nop();
135 						nop();
136 						nop();
137 						nop();
138 					}
139 
140 					backoff *= 2;
141 				}
142 				else
143 				{
144 					Thread::yield();
145 
146 					backoff = 1;
147 				}
148 			};
149 		}
150 
unlock()151 		void unlock()
152 		{
153 			mutex.store(false, std::memory_order_release);
154 		}
155 
isLocked()156 		bool isLocked()
157 		{
158 			return mutex.load(std::memory_order_acquire);
159 		}
160 
161 	private:
162 		struct
163 		{
164 			// Ensure that the mutex variable is on its own 64-byte cache line to avoid false sharing
165 			// Padding must be public to avoid compiler warnings
166 			volatile int padding1[16];
167 			std::atomic<bool> mutex;
168 			volatile int padding2[15];
169 		};
170 	};
171 
172 	using MutexLock = BackoffLock;
173 }
174 
175 #endif   // !__linux__
176 
177 class LockGuard
178 {
179 public:
LockGuard(sw::MutexLock & mutex)180 	explicit LockGuard(sw::MutexLock &mutex) : mutex(&mutex)
181 	{
182 		mutex.lock();
183 	}
184 
LockGuard(sw::MutexLock * mutex)185 	explicit LockGuard(sw::MutexLock *mutex) : mutex(mutex)
186 	{
187 		if (mutex) mutex->lock();
188 	}
189 
~LockGuard()190 	~LockGuard()
191 	{
192 		if (mutex) mutex->unlock();
193 	}
194 
195 protected:
196 	sw::MutexLock *mutex;
197 };
198 
199 #endif   // sw_MutexLock_hpp
200