• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2005 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef ST_CB_BUFFEROBJECTS_H
29 #define ST_CB_BUFFEROBJECTS_H
30 
31 #include "main/mtypes.h"
32 
33 struct dd_function_table;
34 struct pipe_resource;
35 struct pipe_screen;
36 struct st_context;
37 
38 /**
39  * State_tracker vertex/pixel buffer object, derived from Mesa's
40  * gl_buffer_object.
41  */
42 struct st_buffer_object
43 {
44    struct gl_buffer_object Base;
45    struct pipe_resource *buffer;     /* GPU storage */
46 
47    struct gl_context *ctx;  /* the context that owns private_refcount */
48 
49    /* This mechanism allows passing buffer references to the driver without
50     * using atomics to increase the reference count.
51     *
52     * This private refcount can be decremented without atomics but only one
53     * context (ctx above) can use this counter to be thread-safe.
54     *
55     * This number is atomically added to buffer->reference.count at
56     * initialization. If it's never used, the same number is atomically
57     * subtracted from buffer->reference.count before destruction. If this
58     * number is decremented, we can pass that reference to the driver without
59     * touching reference.count. At buffer destruction we only subtract
60     * the number of references we did not return. This can possibly turn
61     * a million atomic increments into 1 add and 1 subtract atomic op.
62     */
63    int private_refcount;
64 
65    struct pipe_transfer *transfer[MAP_COUNT];
66 };
67 
68 
69 /** cast wrapper */
70 static inline struct st_buffer_object *
st_buffer_object(struct gl_buffer_object * obj)71 st_buffer_object(struct gl_buffer_object *obj)
72 {
73    return (struct st_buffer_object *) obj;
74 }
75 
76 
77 enum pipe_map_flags
78 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer);
79 
80 
81 extern void
82 st_init_bufferobject_functions(struct pipe_screen *screen,
83                                struct dd_function_table *functions);
84 
85 static inline struct pipe_resource *
st_get_buffer_reference(struct gl_context * ctx,struct gl_buffer_object * obj)86 st_get_buffer_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
87 {
88    if (unlikely(!obj))
89       return NULL;
90 
91    struct st_buffer_object *stobj = st_buffer_object(obj);
92    struct pipe_resource *buffer = stobj->buffer;
93 
94    if (unlikely(!buffer))
95       return NULL;
96 
97    /* Only one context is using the fast path. All other contexts must use
98     * the slow path.
99     */
100    if (unlikely(stobj->ctx != ctx)) {
101       p_atomic_inc(&buffer->reference.count);
102       return buffer;
103    }
104 
105    if (unlikely(stobj->private_refcount <= 0)) {
106       assert(stobj->private_refcount == 0);
107 
108       /* This is the number of atomic increments we will skip. */
109       stobj->private_refcount = 100000000;
110       p_atomic_add(&buffer->reference.count, stobj->private_refcount);
111    }
112 
113    /* Return a buffer reference while decrementing the private refcount. */
114    stobj->private_refcount--;
115    return buffer;
116 }
117 
118 #endif
119