• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 /**
30  * Functions for pixel buffer objects and vertex/element buffer objects.
31  */
32 
33 
34 #include <inttypes.h>  /* for PRId64 macro */
35 
36 #include "main/imports.h"
37 #include "main/mtypes.h"
38 #include "main/arrayobj.h"
39 #include "main/bufferobj.h"
40 
41 #include "st_context.h"
42 #include "st_cb_bufferobjects.h"
43 #include "st_debug.h"
44 
45 #include "pipe/p_context.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_inlines.h"
48 
49 
50 /**
51  * There is some duplication between mesa's bufferobjects and our
52  * bufmgr buffers.  Both have an integer handle and a hashtable to
53  * lookup an opaque structure.  It would be nice if the handles and
54  * internal structure where somehow shared.
55  */
56 static struct gl_buffer_object *
st_bufferobj_alloc(struct gl_context * ctx,GLuint name)57 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
58 {
59    struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
60 
61    if (!st_obj)
62       return NULL;
63 
64    _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
65 
66    return &st_obj->Base;
67 }
68 
69 
70 
71 /**
72  * Deallocate/free a vertex/pixel buffer object.
73  * Called via glDeleteBuffersARB().
74  */
75 static void
st_bufferobj_free(struct gl_context * ctx,struct gl_buffer_object * obj)76 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
77 {
78    struct st_buffer_object *st_obj = st_buffer_object(obj);
79 
80    assert(obj->RefCount == 0);
81    _mesa_buffer_unmap_all_mappings(ctx, obj);
82 
83    if (st_obj->buffer)
84       pipe_resource_reference(&st_obj->buffer, NULL);
85 
86    _mesa_delete_buffer_object(ctx, obj);
87 }
88 
89 
90 
91 /**
92  * Replace data in a subrange of buffer object.  If the data range
93  * specified by size + offset extends beyond the end of the buffer or
94  * if data is NULL, no copy is performed.
95  * Called via glBufferSubDataARB().
96  */
97 static void
st_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)98 st_bufferobj_subdata(struct gl_context *ctx,
99 		     GLintptrARB offset,
100 		     GLsizeiptrARB size,
101 		     const void * data, struct gl_buffer_object *obj)
102 {
103    struct st_buffer_object *st_obj = st_buffer_object(obj);
104 
105    /* we may be called from VBO code, so double-check params here */
106    assert(offset >= 0);
107    assert(size >= 0);
108    assert(offset + size <= obj->Size);
109 
110    if (!size)
111       return;
112 
113    /*
114     * According to ARB_vertex_buffer_object specification, if data is null,
115     * then the contents of the buffer object's data store is undefined. We just
116     * ignore, and leave it unchanged.
117     */
118    if (!data)
119       return;
120 
121    if (!st_obj->buffer) {
122       /* we probably ran out of memory during buffer allocation */
123       return;
124    }
125 
126    /* Now that transfers are per-context, we don't have to figure out
127     * flushing here.  Usually drivers won't need to flush in this case
128     * even if the buffer is currently referenced by hardware - they
129     * just queue the upload as dma rather than mapping the underlying
130     * buffer directly.
131     */
132    pipe_buffer_write(st_context(ctx)->pipe,
133 		     st_obj->buffer,
134 		     offset, size, data);
135 }
136 
137 
138 /**
139  * Called via glGetBufferSubDataARB().
140  */
141 static void
st_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)142 st_bufferobj_get_subdata(struct gl_context *ctx,
143                          GLintptrARB offset,
144                          GLsizeiptrARB size,
145                          void * data, struct gl_buffer_object *obj)
146 {
147    struct st_buffer_object *st_obj = st_buffer_object(obj);
148 
149    /* we may be called from VBO code, so double-check params here */
150    assert(offset >= 0);
151    assert(size >= 0);
152    assert(offset + size <= obj->Size);
153 
154    if (!size)
155       return;
156 
157    if (!st_obj->buffer) {
158       /* we probably ran out of memory during buffer allocation */
159       return;
160    }
161 
162    pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
163                     offset, size, data);
164 }
165 
166 
167 /**
168  * Allocate space for and store data in a buffer object.  Any data that was
169  * previously stored in the buffer object is lost.  If data is NULL,
170  * memory will be allocated, but no copy will occur.
171  * Called via ctx->Driver.BufferData().
172  * \return GL_TRUE for success, GL_FALSE if out of memory
173  */
174 static GLboolean
st_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)175 st_bufferobj_data(struct gl_context *ctx,
176 		  GLenum target,
177 		  GLsizeiptrARB size,
178 		  const void * data,
179 		  GLenum usage,
180                   GLbitfield storageFlags,
181 		  struct gl_buffer_object *obj)
182 {
183    struct st_context *st = st_context(ctx);
184    struct pipe_context *pipe = st->pipe;
185    struct pipe_screen *screen = pipe->screen;
186    struct st_buffer_object *st_obj = st_buffer_object(obj);
187    unsigned bind, pipe_usage, pipe_flags = 0;
188 
189    if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
190        size && st_obj->buffer &&
191        st_obj->Base.Size == size &&
192        st_obj->Base.Usage == usage &&
193        st_obj->Base.StorageFlags == storageFlags) {
194       if (data) {
195          /* Just discard the old contents and write new data.
196           * This should be the same as creating a new buffer, but we avoid
197           * a lot of validation in Mesa.
198           */
199          pipe->buffer_subdata(pipe, st_obj->buffer,
200                               PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
201                               0, size, data);
202          return GL_TRUE;
203       } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
204          pipe->invalidate_resource(pipe, st_obj->buffer);
205          return GL_TRUE;
206       }
207    }
208 
209    st_obj->Base.Size = size;
210    st_obj->Base.Usage = usage;
211    st_obj->Base.StorageFlags = storageFlags;
212 
213    switch (target) {
214    case GL_PIXEL_PACK_BUFFER_ARB:
215    case GL_PIXEL_UNPACK_BUFFER_ARB:
216       bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
217       break;
218    case GL_ARRAY_BUFFER_ARB:
219       bind = PIPE_BIND_VERTEX_BUFFER;
220       break;
221    case GL_ELEMENT_ARRAY_BUFFER_ARB:
222       bind = PIPE_BIND_INDEX_BUFFER;
223       break;
224    case GL_TEXTURE_BUFFER:
225       bind = PIPE_BIND_SAMPLER_VIEW;
226       break;
227    case GL_TRANSFORM_FEEDBACK_BUFFER:
228       bind = PIPE_BIND_STREAM_OUTPUT;
229       break;
230    case GL_UNIFORM_BUFFER:
231       bind = PIPE_BIND_CONSTANT_BUFFER;
232       break;
233    case GL_DRAW_INDIRECT_BUFFER:
234    case GL_PARAMETER_BUFFER_ARB:
235       bind = PIPE_BIND_COMMAND_ARGS_BUFFER;
236       break;
237    case GL_ATOMIC_COUNTER_BUFFER:
238    case GL_SHADER_STORAGE_BUFFER:
239       bind = PIPE_BIND_SHADER_BUFFER;
240       break;
241    case GL_QUERY_BUFFER:
242       bind = PIPE_BIND_QUERY_BUFFER;
243       break;
244    default:
245       bind = 0;
246    }
247 
248    /* Set usage. */
249    if (st_obj->Base.Immutable) {
250       /* BufferStorage */
251       if (storageFlags & GL_CLIENT_STORAGE_BIT) {
252          if (storageFlags & GL_MAP_READ_BIT)
253             pipe_usage = PIPE_USAGE_STAGING;
254          else
255             pipe_usage = PIPE_USAGE_STREAM;
256       } else {
257          pipe_usage = PIPE_USAGE_DEFAULT;
258       }
259    }
260    else {
261       /* BufferData */
262       switch (usage) {
263       case GL_STATIC_DRAW:
264       case GL_STATIC_COPY:
265       default:
266 	 pipe_usage = PIPE_USAGE_DEFAULT;
267          break;
268       case GL_DYNAMIC_DRAW:
269       case GL_DYNAMIC_COPY:
270          pipe_usage = PIPE_USAGE_DYNAMIC;
271          break;
272       case GL_STREAM_DRAW:
273       case GL_STREAM_COPY:
274          /* XXX: Remove this test and fall-through when we have PBO unpacking
275           * acceleration. Right now, PBO unpacking is done by the CPU, so we
276           * have to make sure CPU reads are fast.
277           */
278          if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
279             pipe_usage = PIPE_USAGE_STREAM;
280             break;
281          }
282          /* fall through */
283       case GL_STATIC_READ:
284       case GL_DYNAMIC_READ:
285       case GL_STREAM_READ:
286          pipe_usage = PIPE_USAGE_STAGING;
287          break;
288       }
289    }
290 
291    /* Set flags. */
292    if (storageFlags & GL_MAP_PERSISTENT_BIT)
293       pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
294    if (storageFlags & GL_MAP_COHERENT_BIT)
295       pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
296 
297    pipe_resource_reference( &st_obj->buffer, NULL );
298 
299    if (ST_DEBUG & DEBUG_BUFFER) {
300       debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
301                    (int64_t) size, bind);
302    }
303 
304    if (size != 0) {
305       struct pipe_resource buffer;
306 
307       memset(&buffer, 0, sizeof buffer);
308       buffer.target = PIPE_BUFFER;
309       buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
310       buffer.bind = bind;
311       buffer.usage = pipe_usage;
312       buffer.flags = pipe_flags;
313       buffer.width0 = size;
314       buffer.height0 = 1;
315       buffer.depth0 = 1;
316       buffer.array_size = 1;
317 
318       if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
319          st_obj->buffer =
320             screen->resource_from_user_memory(screen, &buffer, (void*)data);
321       }
322       else {
323          st_obj->buffer = screen->resource_create(screen, &buffer);
324 
325          if (st_obj->buffer && data)
326             pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
327       }
328 
329       if (!st_obj->buffer) {
330          /* out of memory */
331          st_obj->Base.Size = 0;
332          return GL_FALSE;
333       }
334    }
335 
336    /* The current buffer may be bound, so we have to revalidate all atoms that
337     * might be using it.
338     */
339    /* TODO: Add arrays to usage history */
340    ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
341    if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
342       ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
343    if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
344       ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
345    if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
346       ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
347    if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
348       ctx->NewDriverState |= ST_NEW_ATOMIC_BUFFER;
349 
350    return GL_TRUE;
351 }
352 
353 
354 /**
355  * Called via glInvalidateBuffer(Sub)Data.
356  */
357 static void
st_bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)358 st_bufferobj_invalidate(struct gl_context *ctx,
359                         struct gl_buffer_object *obj,
360                         GLintptr offset,
361                         GLsizeiptr size)
362 {
363    struct st_context *st = st_context(ctx);
364    struct pipe_context *pipe = st->pipe;
365    struct st_buffer_object *st_obj = st_buffer_object(obj);
366 
367    /* We ignore partial invalidates. */
368    if (offset != 0 || size != obj->Size)
369       return;
370 
371    /* Nothing to invalidate. */
372    if (!st_obj->buffer)
373       return;
374 
375    pipe->invalidate_resource(pipe, st_obj->buffer);
376 }
377 
378 
379 /**
380  * Called via glMapBufferRange().
381  */
382 static void *
st_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)383 st_bufferobj_map_range(struct gl_context *ctx,
384                        GLintptr offset, GLsizeiptr length, GLbitfield access,
385                        struct gl_buffer_object *obj,
386                        gl_map_buffer_index index)
387 {
388    struct pipe_context *pipe = st_context(ctx)->pipe;
389    struct st_buffer_object *st_obj = st_buffer_object(obj);
390    enum pipe_transfer_usage flags = 0x0;
391 
392    if (access & GL_MAP_WRITE_BIT)
393       flags |= PIPE_TRANSFER_WRITE;
394 
395    if (access & GL_MAP_READ_BIT)
396       flags |= PIPE_TRANSFER_READ;
397 
398    if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
399       flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
400 
401    if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
402       flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
403    }
404    else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
405       if (offset == 0 && length == obj->Size)
406          flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
407       else
408          flags |= PIPE_TRANSFER_DISCARD_RANGE;
409    }
410 
411    if (access & GL_MAP_UNSYNCHRONIZED_BIT)
412       flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
413 
414    if (access & GL_MAP_PERSISTENT_BIT)
415       flags |= PIPE_TRANSFER_PERSISTENT;
416 
417    if (access & GL_MAP_COHERENT_BIT)
418       flags |= PIPE_TRANSFER_COHERENT;
419 
420    /* ... other flags ...
421     */
422 
423    if (access & MESA_MAP_NOWAIT_BIT)
424       flags |= PIPE_TRANSFER_DONTBLOCK;
425 
426    assert(offset >= 0);
427    assert(length >= 0);
428    assert(offset < obj->Size);
429    assert(offset + length <= obj->Size);
430 
431    obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
432                                         st_obj->buffer,
433                                         offset, length,
434                                         flags,
435                                         &st_obj->transfer[index]);
436    if (obj->Mappings[index].Pointer) {
437       obj->Mappings[index].Offset = offset;
438       obj->Mappings[index].Length = length;
439       obj->Mappings[index].AccessFlags = access;
440    }
441    else {
442       st_obj->transfer[index] = NULL;
443    }
444 
445    return obj->Mappings[index].Pointer;
446 }
447 
448 
449 static void
st_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)450 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
451                                 GLintptr offset, GLsizeiptr length,
452                                 struct gl_buffer_object *obj,
453                                 gl_map_buffer_index index)
454 {
455    struct pipe_context *pipe = st_context(ctx)->pipe;
456    struct st_buffer_object *st_obj = st_buffer_object(obj);
457 
458    /* Subrange is relative to mapped range */
459    assert(offset >= 0);
460    assert(length >= 0);
461    assert(offset + length <= obj->Mappings[index].Length);
462    assert(obj->Mappings[index].Pointer);
463 
464    if (!length)
465       return;
466 
467    pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
468                                   obj->Mappings[index].Offset + offset,
469                                   length);
470 }
471 
472 
473 /**
474  * Called via glUnmapBufferARB().
475  */
476 static GLboolean
st_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)477 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
478                    gl_map_buffer_index index)
479 {
480    struct pipe_context *pipe = st_context(ctx)->pipe;
481    struct st_buffer_object *st_obj = st_buffer_object(obj);
482 
483    if (obj->Mappings[index].Length)
484       pipe_buffer_unmap(pipe, st_obj->transfer[index]);
485 
486    st_obj->transfer[index] = NULL;
487    obj->Mappings[index].Pointer = NULL;
488    obj->Mappings[index].Offset = 0;
489    obj->Mappings[index].Length = 0;
490    return GL_TRUE;
491 }
492 
493 
494 /**
495  * Called via glCopyBufferSubData().
496  */
497 static void
st_copy_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)498 st_copy_buffer_subdata(struct gl_context *ctx,
499                        struct gl_buffer_object *src,
500                        struct gl_buffer_object *dst,
501                        GLintptr readOffset, GLintptr writeOffset,
502                        GLsizeiptr size)
503 {
504    struct pipe_context *pipe = st_context(ctx)->pipe;
505    struct st_buffer_object *srcObj = st_buffer_object(src);
506    struct st_buffer_object *dstObj = st_buffer_object(dst);
507    struct pipe_box box;
508 
509    if (!size)
510       return;
511 
512    /* buffer should not already be mapped */
513    assert(!_mesa_check_disallowed_mapping(src));
514    assert(!_mesa_check_disallowed_mapping(dst));
515 
516    u_box_1d(readOffset, size, &box);
517 
518    pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
519                               srcObj->buffer, 0, &box);
520 }
521 
522 /**
523  * Called via glClearBufferSubData().
524  */
525 static void
st_clear_buffer_subdata(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const void * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)526 st_clear_buffer_subdata(struct gl_context *ctx,
527                         GLintptr offset, GLsizeiptr size,
528                         const void *clearValue,
529                         GLsizeiptr clearValueSize,
530                         struct gl_buffer_object *bufObj)
531 {
532    struct pipe_context *pipe = st_context(ctx)->pipe;
533    struct st_buffer_object *buf = st_buffer_object(bufObj);
534    static const char zeros[16] = {0};
535 
536    if (!pipe->clear_buffer) {
537       _mesa_ClearBufferSubData_sw(ctx, offset, size,
538                                   clearValue, clearValueSize, bufObj);
539       return;
540    }
541 
542    if (!clearValue)
543       clearValue = zeros;
544 
545    pipe->clear_buffer(pipe, buf->buffer, offset, size,
546                       clearValue, clearValueSize);
547 }
548 
549 
550 /* TODO: if buffer wasn't created with appropriate usage flags, need
551  * to recreate it now and copy contents -- or possibly create a
552  * gallium entrypoint to extend the usage flags and let the driver
553  * decide if a copy is necessary.
554  */
555 void
st_bufferobj_validate_usage(struct st_context * st,struct st_buffer_object * obj,unsigned usage)556 st_bufferobj_validate_usage(struct st_context *st,
557 			    struct st_buffer_object *obj,
558 			    unsigned usage)
559 {
560 }
561 
562 
563 void
st_init_bufferobject_functions(struct pipe_screen * screen,struct dd_function_table * functions)564 st_init_bufferobject_functions(struct pipe_screen *screen,
565                                struct dd_function_table *functions)
566 {
567    /* plug in default driver fallbacks (such as for ClearBufferSubData) */
568    _mesa_init_buffer_object_functions(functions);
569 
570    functions->NewBufferObject = st_bufferobj_alloc;
571    functions->DeleteBuffer = st_bufferobj_free;
572    functions->BufferData = st_bufferobj_data;
573    functions->BufferSubData = st_bufferobj_subdata;
574    functions->GetBufferSubData = st_bufferobj_get_subdata;
575    functions->MapBufferRange = st_bufferobj_map_range;
576    functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
577    functions->UnmapBuffer = st_bufferobj_unmap;
578    functions->CopyBufferSubData = st_copy_buffer_subdata;
579    functions->ClearBufferSubData = st_clear_buffer_subdata;
580 
581    if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
582       functions->InvalidateBufferSubData = st_bufferobj_invalidate;
583 }
584