1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: Cody Northrop <cody@lunarg.com>
18 * Author: Mike Stroyan <mike@LunarG.com>
19 */
20
21 #ifndef THREADING_H
22 #define THREADING_H
23 #include <condition_variable>
24 #include <mutex>
25 #include <vector>
26 #include "vk_layer_config.h"
27 #include "vk_layer_logging.h"
28
29 #if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
30 defined(__aarch64__) || defined(__powerpc64__)
31 // If pointers are 64-bit, then there can be separate counters for each
32 // NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
33 #define DISTINCT_NONDISPATCHABLE_HANDLES
34 #endif
35
36 // Draw State ERROR codes
37 enum THREADING_CHECKER_ERROR {
38 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
39 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
40 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
41 };
42
43 struct object_use_data {
44 loader_platform_thread_id thread;
45 int reader_count;
46 int writer_count;
47 };
48
49 struct layer_data;
50
51 namespace threading {
52 volatile bool vulkan_in_use = false;
53 volatile bool vulkan_multi_threaded = false;
54 // starting check if an application is using vulkan from multiple threads.
startMultiThread()55 inline bool startMultiThread() {
56 if (vulkan_multi_threaded) {
57 return true;
58 }
59 if (vulkan_in_use) {
60 vulkan_multi_threaded = true;
61 return true;
62 }
63 vulkan_in_use = true;
64 return false;
65 }
66
67 // finishing check if an application is using vulkan from multiple threads.
finishMultiThread()68 inline void finishMultiThread() { vulkan_in_use = false; }
69 } // namespace threading
70
71 template <typename T> class counter {
72 public:
73 const char *typeName;
74 VkDebugReportObjectTypeEXT objectType;
75 std::unordered_map<T, object_use_data> uses;
76 std::mutex counter_lock;
77 std::condition_variable counter_condition;
startWrite(debug_report_data * report_data,T object)78 void startWrite(debug_report_data *report_data, T object) {
79 bool skipCall = false;
80 loader_platform_thread_id tid = loader_platform_get_thread_id();
81 std::unique_lock<std::mutex> lock(counter_lock);
82 if (uses.find(object) == uses.end()) {
83 // There is no current use of the object. Record writer thread.
84 struct object_use_data *use_data = &uses[object];
85 use_data->reader_count = 0;
86 use_data->writer_count = 1;
87 use_data->thread = tid;
88 } else {
89 struct object_use_data *use_data = &uses[object];
90 if (use_data->reader_count == 0) {
91 // There are no readers. Two writers just collided.
92 if (use_data->thread != tid) {
93 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
94 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
95 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
96 typeName, use_data->thread, tid);
97 if (skipCall) {
98 // Wait for thread-safe access to object instead of skipping call.
99 while (uses.find(object) != uses.end()) {
100 counter_condition.wait(lock);
101 }
102 // There is now no current use of the object. Record writer thread.
103 struct object_use_data *use_data = &uses[object];
104 use_data->thread = tid;
105 use_data->reader_count = 0;
106 use_data->writer_count = 1;
107 } else {
108 // Continue with an unsafe use of the object.
109 use_data->thread = tid;
110 use_data->writer_count += 1;
111 }
112 } else {
113 // This is either safe multiple use in one call, or recursive use.
114 // There is no way to make recursion safe. Just forge ahead.
115 use_data->writer_count += 1;
116 }
117 } else {
118 // There are readers. This writer collided with them.
119 if (use_data->thread != tid) {
120 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
121 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
122 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
123 typeName, use_data->thread, tid);
124 if (skipCall) {
125 // Wait for thread-safe access to object instead of skipping call.
126 while (uses.find(object) != uses.end()) {
127 counter_condition.wait(lock);
128 }
129 // There is now no current use of the object. Record writer thread.
130 struct object_use_data *use_data = &uses[object];
131 use_data->thread = tid;
132 use_data->reader_count = 0;
133 use_data->writer_count = 1;
134 } else {
135 // Continue with an unsafe use of the object.
136 use_data->thread = tid;
137 use_data->writer_count += 1;
138 }
139 } else {
140 // This is either safe multiple use in one call, or recursive use.
141 // There is no way to make recursion safe. Just forge ahead.
142 use_data->writer_count += 1;
143 }
144 }
145 }
146 }
147
finishWrite(T object)148 void finishWrite(T object) {
149 // Object is no longer in use
150 std::unique_lock<std::mutex> lock(counter_lock);
151 uses[object].writer_count -= 1;
152 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
153 uses.erase(object);
154 }
155 // Notify any waiting threads that this object may be safe to use
156 lock.unlock();
157 counter_condition.notify_all();
158 }
159
startRead(debug_report_data * report_data,T object)160 void startRead(debug_report_data *report_data, T object) {
161 bool skipCall = false;
162 loader_platform_thread_id tid = loader_platform_get_thread_id();
163 std::unique_lock<std::mutex> lock(counter_lock);
164 if (uses.find(object) == uses.end()) {
165 // There is no current use of the object. Record reader count
166 struct object_use_data *use_data = &uses[object];
167 use_data->reader_count = 1;
168 use_data->writer_count = 0;
169 use_data->thread = tid;
170 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
171 // There is a writer of the object.
172 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
173 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
174 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
175 uses[object].thread, tid);
176 if (skipCall) {
177 // Wait for thread-safe access to object instead of skipping call.
178 while (uses.find(object) != uses.end()) {
179 counter_condition.wait(lock);
180 }
181 // There is no current use of the object. Record reader count
182 struct object_use_data *use_data = &uses[object];
183 use_data->reader_count = 1;
184 use_data->writer_count = 0;
185 use_data->thread = tid;
186 } else {
187 uses[object].reader_count += 1;
188 }
189 } else {
190 // There are other readers of the object. Increase reader count
191 uses[object].reader_count += 1;
192 }
193 }
finishRead(T object)194 void finishRead(T object) {
195 std::unique_lock<std::mutex> lock(counter_lock);
196 uses[object].reader_count -= 1;
197 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
198 uses.erase(object);
199 }
200 // Notify any waiting threads that this object may be safe to use
201 lock.unlock();
202 counter_condition.notify_all();
203 }
204 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
205 typeName = name;
206 objectType = type;
207 }
208 };
209
210 struct layer_data {
211 VkInstance instance;
212
213 debug_report_data *report_data;
214 std::vector<VkDebugReportCallbackEXT> logging_callback;
215 VkLayerDispatchTable *device_dispatch_table;
216 VkLayerInstanceDispatchTable *instance_dispatch_table;
217 // The following are for keeping track of the temporary callbacks that can
218 // be used in vkCreateInstance and vkDestroyInstance:
219 uint32_t num_tmp_callbacks;
220 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
221 VkDebugReportCallbackEXT *tmp_callbacks;
222 counter<VkCommandBuffer> c_VkCommandBuffer;
223 counter<VkDevice> c_VkDevice;
224 counter<VkInstance> c_VkInstance;
225 counter<VkQueue> c_VkQueue;
226 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
227 counter<VkBuffer> c_VkBuffer;
228 counter<VkBufferView> c_VkBufferView;
229 counter<VkCommandPool> c_VkCommandPool;
230 counter<VkDescriptorPool> c_VkDescriptorPool;
231 counter<VkDescriptorSet> c_VkDescriptorSet;
232 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
233 counter<VkDeviceMemory> c_VkDeviceMemory;
234 counter<VkEvent> c_VkEvent;
235 counter<VkFence> c_VkFence;
236 counter<VkFramebuffer> c_VkFramebuffer;
237 counter<VkImage> c_VkImage;
238 counter<VkImageView> c_VkImageView;
239 counter<VkPipeline> c_VkPipeline;
240 counter<VkPipelineCache> c_VkPipelineCache;
241 counter<VkPipelineLayout> c_VkPipelineLayout;
242 counter<VkQueryPool> c_VkQueryPool;
243 counter<VkRenderPass> c_VkRenderPass;
244 counter<VkSampler> c_VkSampler;
245 counter<VkSemaphore> c_VkSemaphore;
246 counter<VkShaderModule> c_VkShaderModule;
247 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
248 #else // DISTINCT_NONDISPATCHABLE_HANDLES
249 counter<uint64_t> c_uint64_t;
250 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
layer_datalayer_data251 layer_data()
252 : report_data(nullptr), num_tmp_callbacks(0), tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr),
253 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
254 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
255 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
256 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
257 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
258 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
259 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
260 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
261 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
262 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
263 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
264 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
265 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
266 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
267 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
268 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
269 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
270 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
271 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
272 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
273 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
274 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
275 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
276 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
277 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
278 #else // DISTINCT_NONDISPATCHABLE_HANDLES
279 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
280 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
281 {};
282 };
283
284 #define WRAPPER(type) \
285 static void startWriteObject(struct layer_data *my_data, type object) { \
286 my_data->c_##type.startWrite(my_data->report_data, object); \
287 } \
288 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
289 static void startReadObject(struct layer_data *my_data, type object) { \
290 my_data->c_##type.startRead(my_data->report_data, object); \
291 } \
292 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
293
294 WRAPPER(VkDevice)
295 WRAPPER(VkInstance)
296 WRAPPER(VkQueue)
297 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
298 WRAPPER(VkBuffer)
299 WRAPPER(VkBufferView)
300 WRAPPER(VkCommandPool)
301 WRAPPER(VkDescriptorPool)
302 WRAPPER(VkDescriptorSet)
303 WRAPPER(VkDescriptorSetLayout)
304 WRAPPER(VkDeviceMemory)
305 WRAPPER(VkEvent)
306 WRAPPER(VkFence)
307 WRAPPER(VkFramebuffer)
308 WRAPPER(VkImage)
309 WRAPPER(VkImageView)
310 WRAPPER(VkPipeline)
311 WRAPPER(VkPipelineCache)
312 WRAPPER(VkPipelineLayout)
313 WRAPPER(VkQueryPool)
314 WRAPPER(VkRenderPass)
315 WRAPPER(VkSampler)
316 WRAPPER(VkSemaphore)
317 WRAPPER(VkShaderModule)
318 WRAPPER(VkDebugReportCallbackEXT)
319 #else // DISTINCT_NONDISPATCHABLE_HANDLES
320 WRAPPER(uint64_t)
321 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
322
323 static std::unordered_map<void *, layer_data *> layer_data_map;
324 static std::mutex command_pool_lock;
325 static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
326
327 // VkCommandBuffer needs check for implicit use of command pool
328 static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
329 if (lockPool) {
330 std::unique_lock<std::mutex> lock(command_pool_lock);
331 VkCommandPool pool = command_pool_map[object];
332 lock.unlock();
333 startWriteObject(my_data, pool);
334 }
335 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
336 }
337 static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
338 my_data->c_VkCommandBuffer.finishWrite(object);
339 if (lockPool) {
340 std::unique_lock<std::mutex> lock(command_pool_lock);
341 VkCommandPool pool = command_pool_map[object];
342 lock.unlock();
343 finishWriteObject(my_data, pool);
344 }
345 }
startReadObject(struct layer_data * my_data,VkCommandBuffer object)346 static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
347 std::unique_lock<std::mutex> lock(command_pool_lock);
348 VkCommandPool pool = command_pool_map[object];
349 lock.unlock();
350 startReadObject(my_data, pool);
351 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
352 }
finishReadObject(struct layer_data * my_data,VkCommandBuffer object)353 static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
354 my_data->c_VkCommandBuffer.finishRead(object);
355 std::unique_lock<std::mutex> lock(command_pool_lock);
356 VkCommandPool pool = command_pool_map[object];
357 lock.unlock();
358 finishReadObject(my_data, pool);
359 }
360 #endif // THREADING_H
361