• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007-2008 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * \file
30  * Debug buffer manager to detect buffer under- and overflows.
31  *
32  * \author Jose Fonseca <jfonseca@vmware.com>
33  */
34 
35 
36 #include "pipe/p_compiler.h"
37 #include "util/u_debug.h"
38 #include "os/os_thread.h"
39 #include "util/u_math.h"
40 #include "util/u_memory.h"
41 #include "util/list.h"
42 #include "util/u_time.h"
43 #include "util/u_debug_stack.h"
44 #include <inttypes.h>
45 
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
48 
49 
50 #ifdef DEBUG
51 
52 
53 #define PB_DEBUG_CREATE_BACKTRACE 8
54 #define PB_DEBUG_MAP_BACKTRACE 8
55 
56 
57 /**
58  * Convenience macro (type safe).
59  */
60 #define SUPER(__derived) (&(__derived)->base)
61 
62 
63 struct pb_debug_manager;
64 
65 
66 /**
67  * Wrapper around a pipe buffer which adds delayed destruction.
68  */
69 struct pb_debug_buffer
70 {
71    struct pb_buffer base;
72 
73    struct pb_buffer *buffer;
74    struct pb_debug_manager *mgr;
75 
76    pb_size underflow_size;
77    pb_size overflow_size;
78 
79    struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
80 
81    pipe_mutex mutex;
82    unsigned map_count;
83    struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
84 
85    struct list_head head;
86 };
87 
88 
89 struct pb_debug_manager
90 {
91    struct pb_manager base;
92 
93    struct pb_manager *provider;
94 
95    pb_size underflow_size;
96    pb_size overflow_size;
97 
98    pipe_mutex mutex;
99    struct list_head list;
100 };
101 
102 
103 static inline struct pb_debug_buffer *
pb_debug_buffer(struct pb_buffer * buf)104 pb_debug_buffer(struct pb_buffer *buf)
105 {
106    assert(buf);
107    return (struct pb_debug_buffer *)buf;
108 }
109 
110 
111 static inline struct pb_debug_manager *
pb_debug_manager(struct pb_manager * mgr)112 pb_debug_manager(struct pb_manager *mgr)
113 {
114    assert(mgr);
115    return (struct pb_debug_manager *)mgr;
116 }
117 
118 
119 static const uint8_t random_pattern[32] = {
120    0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
121    0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
122    0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
123    0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
124 };
125 
126 
127 static inline void
fill_random_pattern(uint8_t * dst,pb_size size)128 fill_random_pattern(uint8_t *dst, pb_size size)
129 {
130    pb_size i = 0;
131    while(size--) {
132       *dst++ = random_pattern[i++];
133       i &= sizeof(random_pattern) - 1;
134    }
135 }
136 
137 
138 static inline boolean
check_random_pattern(const uint8_t * dst,pb_size size,pb_size * min_ofs,pb_size * max_ofs)139 check_random_pattern(const uint8_t *dst, pb_size size,
140                      pb_size *min_ofs, pb_size *max_ofs)
141 {
142    boolean result = TRUE;
143    pb_size i;
144    *min_ofs = size;
145    *max_ofs = 0;
146    for(i = 0; i < size; ++i) {
147       if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
148          *min_ofs = MIN2(*min_ofs, i);
149          *max_ofs = MAX2(*max_ofs, i);
150 	 result = FALSE;
151       }
152    }
153    return result;
154 }
155 
156 
157 static void
pb_debug_buffer_fill(struct pb_debug_buffer * buf)158 pb_debug_buffer_fill(struct pb_debug_buffer *buf)
159 {
160    uint8_t *map;
161 
162    map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
163    assert(map);
164    if (map) {
165       fill_random_pattern(map, buf->underflow_size);
166       fill_random_pattern(map + buf->underflow_size + buf->base.size,
167                           buf->overflow_size);
168       pb_unmap(buf->buffer);
169    }
170 }
171 
172 
173 /**
174  * Check for under/over flows.
175  *
176  * Should be called with the buffer unmaped.
177  */
178 static void
pb_debug_buffer_check(struct pb_debug_buffer * buf)179 pb_debug_buffer_check(struct pb_debug_buffer *buf)
180 {
181    uint8_t *map;
182 
183    map = pb_map(buf->buffer,
184                 PB_USAGE_CPU_READ |
185                 PB_USAGE_UNSYNCHRONIZED, NULL);
186    assert(map);
187    if (map) {
188       boolean underflow, overflow;
189       pb_size min_ofs, max_ofs;
190 
191       underflow = !check_random_pattern(map, buf->underflow_size,
192                                         &min_ofs, &max_ofs);
193       if(underflow) {
194          debug_printf("buffer underflow (offset -%"PRIu64"%s to -%"PRIu64" bytes) detected\n",
195                       buf->underflow_size - min_ofs,
196                       min_ofs == 0 ? "+" : "",
197                       buf->underflow_size - max_ofs);
198       }
199 
200       overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
201                                        buf->overflow_size,
202                                        &min_ofs, &max_ofs);
203       if(overflow) {
204          debug_printf("buffer overflow (size %"PRIu64" plus offset %"PRIu64" to %"PRIu64"%s bytes) detected\n",
205                       buf->base.size,
206                       min_ofs,
207                       max_ofs,
208                       max_ofs == buf->overflow_size - 1 ? "+" : "");
209       }
210 
211       if(underflow || overflow)
212          debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
213 
214       debug_assert(!underflow);
215       debug_assert(!overflow);
216 
217       /* re-fill if not aborted */
218       if(underflow)
219          fill_random_pattern(map, buf->underflow_size);
220       if(overflow)
221          fill_random_pattern(map + buf->underflow_size + buf->base.size,
222                              buf->overflow_size);
223 
224       pb_unmap(buf->buffer);
225    }
226 }
227 
228 
229 static void
pb_debug_buffer_destroy(struct pb_buffer * _buf)230 pb_debug_buffer_destroy(struct pb_buffer *_buf)
231 {
232    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
233    struct pb_debug_manager *mgr = buf->mgr;
234 
235    assert(!pipe_is_referenced(&buf->base.reference));
236 
237    pb_debug_buffer_check(buf);
238 
239    pipe_mutex_lock(mgr->mutex);
240    LIST_DEL(&buf->head);
241    pipe_mutex_unlock(mgr->mutex);
242 
243    pipe_mutex_destroy(buf->mutex);
244 
245    pb_reference(&buf->buffer, NULL);
246    FREE(buf);
247 }
248 
249 
250 static void *
pb_debug_buffer_map(struct pb_buffer * _buf,unsigned flags,void * flush_ctx)251 pb_debug_buffer_map(struct pb_buffer *_buf,
252                     unsigned flags, void *flush_ctx)
253 {
254    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
255    void *map;
256 
257    pb_debug_buffer_check(buf);
258 
259    map = pb_map(buf->buffer, flags, flush_ctx);
260    if (!map)
261       return NULL;
262 
263    pipe_mutex_lock(buf->mutex);
264    ++buf->map_count;
265    debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
266    pipe_mutex_unlock(buf->mutex);
267 
268    return (uint8_t *)map + buf->underflow_size;
269 }
270 
271 
272 static void
pb_debug_buffer_unmap(struct pb_buffer * _buf)273 pb_debug_buffer_unmap(struct pb_buffer *_buf)
274 {
275    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
276 
277    pipe_mutex_lock(buf->mutex);
278    assert(buf->map_count);
279    if(buf->map_count)
280       --buf->map_count;
281    pipe_mutex_unlock(buf->mutex);
282 
283    pb_unmap(buf->buffer);
284 
285    pb_debug_buffer_check(buf);
286 }
287 
288 
289 static void
pb_debug_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)290 pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
291                                 struct pb_buffer **base_buf,
292                                 pb_size *offset)
293 {
294    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
295    pb_get_base_buffer(buf->buffer, base_buf, offset);
296    *offset += buf->underflow_size;
297 }
298 
299 
300 static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)301 pb_debug_buffer_validate(struct pb_buffer *_buf,
302                          struct pb_validate *vl,
303                          unsigned flags)
304 {
305    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
306 
307    pipe_mutex_lock(buf->mutex);
308    if(buf->map_count) {
309       debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
310       debug_printf("last map backtrace is\n");
311       debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
312    }
313    pipe_mutex_unlock(buf->mutex);
314 
315    pb_debug_buffer_check(buf);
316 
317    return pb_validate(buf->buffer, vl, flags);
318 }
319 
320 
321 static void
pb_debug_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)322 pb_debug_buffer_fence(struct pb_buffer *_buf,
323                       struct pipe_fence_handle *fence)
324 {
325    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
326    pb_fence(buf->buffer, fence);
327 }
328 
329 
330 const struct pb_vtbl
331 pb_debug_buffer_vtbl = {
332       pb_debug_buffer_destroy,
333       pb_debug_buffer_map,
334       pb_debug_buffer_unmap,
335       pb_debug_buffer_validate,
336       pb_debug_buffer_fence,
337       pb_debug_buffer_get_base_buffer
338 };
339 
340 
341 static void
pb_debug_manager_dump_locked(struct pb_debug_manager * mgr)342 pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
343 {
344    struct list_head *curr, *next;
345    struct pb_debug_buffer *buf;
346 
347    curr = mgr->list.next;
348    next = curr->next;
349    while(curr != &mgr->list) {
350       buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
351 
352       debug_printf("buffer = %p\n", (void *) buf);
353       debug_printf("    .size = 0x%"PRIx64"\n", buf->base.size);
354       debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
355 
356       curr = next;
357       next = curr->next;
358    }
359 
360 }
361 
362 
363 static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)364 pb_debug_manager_create_buffer(struct pb_manager *_mgr,
365                                pb_size size,
366                                const struct pb_desc *desc)
367 {
368    struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
369    struct pb_debug_buffer *buf;
370    struct pb_desc real_desc;
371    pb_size real_size;
372 
373    assert(size);
374    assert(desc->alignment);
375 
376    buf = CALLOC_STRUCT(pb_debug_buffer);
377    if (!buf)
378       return NULL;
379 
380    real_size = mgr->underflow_size + size + mgr->overflow_size;
381    real_desc = *desc;
382    real_desc.usage |= PB_USAGE_CPU_WRITE;
383    real_desc.usage |= PB_USAGE_CPU_READ;
384 
385    buf->buffer = mgr->provider->create_buffer(mgr->provider,
386                                               real_size,
387                                               &real_desc);
388    if(!buf->buffer) {
389       FREE(buf);
390 #if 0
391       pipe_mutex_lock(mgr->mutex);
392       debug_printf("%s: failed to create buffer\n", __FUNCTION__);
393       if(!LIST_IS_EMPTY(&mgr->list))
394          pb_debug_manager_dump_locked(mgr);
395       pipe_mutex_unlock(mgr->mutex);
396 #endif
397       return NULL;
398    }
399 
400    assert(pipe_is_referenced(&buf->buffer->reference));
401    assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
402    assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
403    assert(buf->buffer->size >= real_size);
404 
405    pipe_reference_init(&buf->base.reference, 1);
406    buf->base.alignment = desc->alignment;
407    buf->base.usage = desc->usage;
408    buf->base.size = size;
409 
410    buf->base.vtbl = &pb_debug_buffer_vtbl;
411    buf->mgr = mgr;
412 
413    buf->underflow_size = mgr->underflow_size;
414    buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
415 
416    debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
417 
418    pb_debug_buffer_fill(buf);
419 
420    pipe_mutex_init(buf->mutex);
421 
422    pipe_mutex_lock(mgr->mutex);
423    LIST_ADDTAIL(&buf->head, &mgr->list);
424    pipe_mutex_unlock(mgr->mutex);
425 
426    return &buf->base;
427 }
428 
429 
430 static void
pb_debug_manager_flush(struct pb_manager * _mgr)431 pb_debug_manager_flush(struct pb_manager *_mgr)
432 {
433    struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
434    assert(mgr->provider->flush);
435    if(mgr->provider->flush)
436       mgr->provider->flush(mgr->provider);
437 }
438 
439 
440 static void
pb_debug_manager_destroy(struct pb_manager * _mgr)441 pb_debug_manager_destroy(struct pb_manager *_mgr)
442 {
443    struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
444 
445    pipe_mutex_lock(mgr->mutex);
446    if(!LIST_IS_EMPTY(&mgr->list)) {
447       debug_printf("%s: unfreed buffers\n", __FUNCTION__);
448       pb_debug_manager_dump_locked(mgr);
449    }
450    pipe_mutex_unlock(mgr->mutex);
451 
452    pipe_mutex_destroy(mgr->mutex);
453    mgr->provider->destroy(mgr->provider);
454    FREE(mgr);
455 }
456 
457 
458 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)459 pb_debug_manager_create(struct pb_manager *provider,
460                         pb_size underflow_size, pb_size overflow_size)
461 {
462    struct pb_debug_manager *mgr;
463 
464    if (!provider)
465       return NULL;
466 
467    mgr = CALLOC_STRUCT(pb_debug_manager);
468    if (!mgr)
469       return NULL;
470 
471    mgr->base.destroy = pb_debug_manager_destroy;
472    mgr->base.create_buffer = pb_debug_manager_create_buffer;
473    mgr->base.flush = pb_debug_manager_flush;
474    mgr->provider = provider;
475    mgr->underflow_size = underflow_size;
476    mgr->overflow_size = overflow_size;
477 
478    pipe_mutex_init(mgr->mutex);
479    LIST_INITHEAD(&mgr->list);
480 
481    return &mgr->base;
482 }
483 
484 
485 #else /* !DEBUG */
486 
487 
488 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)489 pb_debug_manager_create(struct pb_manager *provider,
490                         pb_size underflow_size, pb_size overflow_size)
491 {
492    return provider;
493 }
494 
495 
496 #endif /* !DEBUG */
497