1 /**************************************************************************
2 *
3 * Copyright 2007-2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Debug buffer manager to detect buffer under- and overflows.
31 *
32 * \author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35
36 #include "pipe/p_compiler.h"
37 #include "util/u_debug.h"
38 #include "os/os_thread.h"
39 #include "util/u_math.h"
40 #include "util/u_memory.h"
41 #include "util/list.h"
42 #include "util/u_debug_stack.h"
43 #include <inttypes.h>
44
45 #include "pb_buffer.h"
46 #include "pb_bufmgr.h"
47
48
49 #ifdef DEBUG
50
51
52 #define PB_DEBUG_CREATE_BACKTRACE 8
53 #define PB_DEBUG_MAP_BACKTRACE 8
54
55
56 /**
57 * Convenience macro (type safe).
58 */
59 #define SUPER(__derived) (&(__derived)->base)
60
61
62 struct pb_debug_manager;
63
64
65 /**
66 * Wrapper around a pipe buffer which adds delayed destruction.
67 */
68 struct pb_debug_buffer
69 {
70 struct pb_buffer base;
71
72 struct pb_buffer *buffer;
73 struct pb_debug_manager *mgr;
74
75 pb_size underflow_size;
76 pb_size overflow_size;
77
78 struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
79
80 mtx_t mutex;
81 unsigned map_count;
82 struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
83
84 struct list_head head;
85 };
86
87
88 struct pb_debug_manager
89 {
90 struct pb_manager base;
91
92 struct pb_manager *provider;
93
94 pb_size underflow_size;
95 pb_size overflow_size;
96
97 mtx_t mutex;
98 struct list_head list;
99 };
100
101
102 static inline struct pb_debug_buffer *
pb_debug_buffer(struct pb_buffer * buf)103 pb_debug_buffer(struct pb_buffer *buf)
104 {
105 assert(buf);
106 return (struct pb_debug_buffer *)buf;
107 }
108
109
110 static inline struct pb_debug_manager *
pb_debug_manager(struct pb_manager * mgr)111 pb_debug_manager(struct pb_manager *mgr)
112 {
113 assert(mgr);
114 return (struct pb_debug_manager *)mgr;
115 }
116
117
118 static const uint8_t random_pattern[32] = {
119 0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
120 0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
121 0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
122 0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
123 };
124
125
126 static inline void
fill_random_pattern(uint8_t * dst,pb_size size)127 fill_random_pattern(uint8_t *dst, pb_size size)
128 {
129 pb_size i = 0;
130 while(size--) {
131 *dst++ = random_pattern[i++];
132 i &= sizeof(random_pattern) - 1;
133 }
134 }
135
136
137 static inline boolean
check_random_pattern(const uint8_t * dst,pb_size size,pb_size * min_ofs,pb_size * max_ofs)138 check_random_pattern(const uint8_t *dst, pb_size size,
139 pb_size *min_ofs, pb_size *max_ofs)
140 {
141 boolean result = TRUE;
142 pb_size i;
143 *min_ofs = size;
144 *max_ofs = 0;
145 for(i = 0; i < size; ++i) {
146 if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
147 *min_ofs = MIN2(*min_ofs, i);
148 *max_ofs = MAX2(*max_ofs, i);
149 result = FALSE;
150 }
151 }
152 return result;
153 }
154
155
156 static void
pb_debug_buffer_fill(struct pb_debug_buffer * buf)157 pb_debug_buffer_fill(struct pb_debug_buffer *buf)
158 {
159 uint8_t *map;
160
161 map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
162 assert(map);
163 if (map) {
164 fill_random_pattern(map, buf->underflow_size);
165 fill_random_pattern(map + buf->underflow_size + buf->base.size,
166 buf->overflow_size);
167 pb_unmap(buf->buffer);
168 }
169 }
170
171
172 /**
173 * Check for under/over flows.
174 *
175 * Should be called with the buffer unmaped.
176 */
177 static void
pb_debug_buffer_check(struct pb_debug_buffer * buf)178 pb_debug_buffer_check(struct pb_debug_buffer *buf)
179 {
180 uint8_t *map;
181
182 map = pb_map(buf->buffer,
183 PB_USAGE_CPU_READ |
184 PB_USAGE_UNSYNCHRONIZED, NULL);
185 assert(map);
186 if (map) {
187 boolean underflow, overflow;
188 pb_size min_ofs, max_ofs;
189
190 underflow = !check_random_pattern(map, buf->underflow_size,
191 &min_ofs, &max_ofs);
192 if(underflow) {
193 debug_printf("buffer underflow (offset -%"PRIu64"%s to -%"PRIu64" bytes) detected\n",
194 buf->underflow_size - min_ofs,
195 min_ofs == 0 ? "+" : "",
196 buf->underflow_size - max_ofs);
197 }
198
199 overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
200 buf->overflow_size,
201 &min_ofs, &max_ofs);
202 if(overflow) {
203 debug_printf("buffer overflow (size %"PRIu64" plus offset %"PRIu64" to %"PRIu64"%s bytes) detected\n",
204 buf->base.size,
205 min_ofs,
206 max_ofs,
207 max_ofs == buf->overflow_size - 1 ? "+" : "");
208 }
209
210 if(underflow || overflow)
211 debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
212
213 debug_assert(!underflow);
214 debug_assert(!overflow);
215
216 /* re-fill if not aborted */
217 if(underflow)
218 fill_random_pattern(map, buf->underflow_size);
219 if(overflow)
220 fill_random_pattern(map + buf->underflow_size + buf->base.size,
221 buf->overflow_size);
222
223 pb_unmap(buf->buffer);
224 }
225 }
226
227
228 static void
pb_debug_buffer_destroy(struct pb_buffer * _buf)229 pb_debug_buffer_destroy(struct pb_buffer *_buf)
230 {
231 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
232 struct pb_debug_manager *mgr = buf->mgr;
233
234 assert(!pipe_is_referenced(&buf->base.reference));
235
236 pb_debug_buffer_check(buf);
237
238 mtx_lock(&mgr->mutex);
239 LIST_DEL(&buf->head);
240 mtx_unlock(&mgr->mutex);
241
242 mtx_destroy(&buf->mutex);
243
244 pb_reference(&buf->buffer, NULL);
245 FREE(buf);
246 }
247
248
249 static void *
pb_debug_buffer_map(struct pb_buffer * _buf,unsigned flags,void * flush_ctx)250 pb_debug_buffer_map(struct pb_buffer *_buf,
251 unsigned flags, void *flush_ctx)
252 {
253 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
254 void *map;
255
256 pb_debug_buffer_check(buf);
257
258 map = pb_map(buf->buffer, flags, flush_ctx);
259 if (!map)
260 return NULL;
261
262 mtx_lock(&buf->mutex);
263 ++buf->map_count;
264 debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
265 mtx_unlock(&buf->mutex);
266
267 return (uint8_t *)map + buf->underflow_size;
268 }
269
270
271 static void
pb_debug_buffer_unmap(struct pb_buffer * _buf)272 pb_debug_buffer_unmap(struct pb_buffer *_buf)
273 {
274 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
275
276 mtx_lock(&buf->mutex);
277 assert(buf->map_count);
278 if(buf->map_count)
279 --buf->map_count;
280 mtx_unlock(&buf->mutex);
281
282 pb_unmap(buf->buffer);
283
284 pb_debug_buffer_check(buf);
285 }
286
287
288 static void
pb_debug_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)289 pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
290 struct pb_buffer **base_buf,
291 pb_size *offset)
292 {
293 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
294 pb_get_base_buffer(buf->buffer, base_buf, offset);
295 *offset += buf->underflow_size;
296 }
297
298
299 static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)300 pb_debug_buffer_validate(struct pb_buffer *_buf,
301 struct pb_validate *vl,
302 unsigned flags)
303 {
304 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
305
306 mtx_lock(&buf->mutex);
307 if(buf->map_count) {
308 debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
309 debug_printf("last map backtrace is\n");
310 debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
311 }
312 mtx_unlock(&buf->mutex);
313
314 pb_debug_buffer_check(buf);
315
316 return pb_validate(buf->buffer, vl, flags);
317 }
318
319
320 static void
pb_debug_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)321 pb_debug_buffer_fence(struct pb_buffer *_buf,
322 struct pipe_fence_handle *fence)
323 {
324 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
325 pb_fence(buf->buffer, fence);
326 }
327
328
329 const struct pb_vtbl
330 pb_debug_buffer_vtbl = {
331 pb_debug_buffer_destroy,
332 pb_debug_buffer_map,
333 pb_debug_buffer_unmap,
334 pb_debug_buffer_validate,
335 pb_debug_buffer_fence,
336 pb_debug_buffer_get_base_buffer
337 };
338
339
340 static void
pb_debug_manager_dump_locked(struct pb_debug_manager * mgr)341 pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
342 {
343 struct list_head *curr, *next;
344 struct pb_debug_buffer *buf;
345
346 curr = mgr->list.next;
347 next = curr->next;
348 while(curr != &mgr->list) {
349 buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
350
351 debug_printf("buffer = %p\n", (void *) buf);
352 debug_printf(" .size = 0x%"PRIx64"\n", buf->base.size);
353 debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
354
355 curr = next;
356 next = curr->next;
357 }
358
359 }
360
361
362 static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)363 pb_debug_manager_create_buffer(struct pb_manager *_mgr,
364 pb_size size,
365 const struct pb_desc *desc)
366 {
367 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
368 struct pb_debug_buffer *buf;
369 struct pb_desc real_desc;
370 pb_size real_size;
371
372 assert(size);
373 assert(desc->alignment);
374
375 buf = CALLOC_STRUCT(pb_debug_buffer);
376 if (!buf)
377 return NULL;
378
379 real_size = mgr->underflow_size + size + mgr->overflow_size;
380 real_desc = *desc;
381 real_desc.usage |= PB_USAGE_CPU_WRITE;
382 real_desc.usage |= PB_USAGE_CPU_READ;
383
384 buf->buffer = mgr->provider->create_buffer(mgr->provider,
385 real_size,
386 &real_desc);
387 if(!buf->buffer) {
388 FREE(buf);
389 #if 0
390 mtx_lock(&mgr->mutex);
391 debug_printf("%s: failed to create buffer\n", __FUNCTION__);
392 if(!LIST_IS_EMPTY(&mgr->list))
393 pb_debug_manager_dump_locked(mgr);
394 mtx_unlock(&mgr->mutex);
395 #endif
396 return NULL;
397 }
398
399 assert(pipe_is_referenced(&buf->buffer->reference));
400 assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
401 assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
402 assert(buf->buffer->size >= real_size);
403
404 pipe_reference_init(&buf->base.reference, 1);
405 buf->base.alignment = desc->alignment;
406 buf->base.usage = desc->usage;
407 buf->base.size = size;
408
409 buf->base.vtbl = &pb_debug_buffer_vtbl;
410 buf->mgr = mgr;
411
412 buf->underflow_size = mgr->underflow_size;
413 buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
414
415 debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
416
417 pb_debug_buffer_fill(buf);
418
419 (void) mtx_init(&buf->mutex, mtx_plain);
420
421 mtx_lock(&mgr->mutex);
422 LIST_ADDTAIL(&buf->head, &mgr->list);
423 mtx_unlock(&mgr->mutex);
424
425 return &buf->base;
426 }
427
428
429 static void
pb_debug_manager_flush(struct pb_manager * _mgr)430 pb_debug_manager_flush(struct pb_manager *_mgr)
431 {
432 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
433 assert(mgr->provider->flush);
434 if(mgr->provider->flush)
435 mgr->provider->flush(mgr->provider);
436 }
437
438
439 static void
pb_debug_manager_destroy(struct pb_manager * _mgr)440 pb_debug_manager_destroy(struct pb_manager *_mgr)
441 {
442 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
443
444 mtx_lock(&mgr->mutex);
445 if(!LIST_IS_EMPTY(&mgr->list)) {
446 debug_printf("%s: unfreed buffers\n", __FUNCTION__);
447 pb_debug_manager_dump_locked(mgr);
448 }
449 mtx_unlock(&mgr->mutex);
450
451 mtx_destroy(&mgr->mutex);
452 mgr->provider->destroy(mgr->provider);
453 FREE(mgr);
454 }
455
456
457 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)458 pb_debug_manager_create(struct pb_manager *provider,
459 pb_size underflow_size, pb_size overflow_size)
460 {
461 struct pb_debug_manager *mgr;
462
463 if (!provider)
464 return NULL;
465
466 mgr = CALLOC_STRUCT(pb_debug_manager);
467 if (!mgr)
468 return NULL;
469
470 mgr->base.destroy = pb_debug_manager_destroy;
471 mgr->base.create_buffer = pb_debug_manager_create_buffer;
472 mgr->base.flush = pb_debug_manager_flush;
473 mgr->provider = provider;
474 mgr->underflow_size = underflow_size;
475 mgr->overflow_size = overflow_size;
476
477 (void) mtx_init(&mgr->mutex, mtx_plain);
478 LIST_INITHEAD(&mgr->list);
479
480 return &mgr->base;
481 }
482
483
484 #else /* !DEBUG */
485
486
487 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)488 pb_debug_manager_create(struct pb_manager *provider,
489 pb_size underflow_size, pb_size overflow_size)
490 {
491 return provider;
492 }
493
494
495 #endif /* !DEBUG */
496