• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 /**
30  * Functions for pixel buffer objects and vertex/element buffer objects.
31  */
32 
33 
34 #include <inttypes.h>  /* for PRId64 macro */
35 
36 #include "main/errors.h"
37 
38 #include "main/mtypes.h"
39 #include "main/arrayobj.h"
40 #include "main/bufferobj.h"
41 
42 #include "st_context.h"
43 #include "st_cb_bufferobjects.h"
44 #include "st_cb_memoryobjects.h"
45 #include "st_debug.h"
46 #include "st_util.h"
47 
48 #include "pipe/p_context.h"
49 #include "pipe/p_defines.h"
50 #include "util/u_inlines.h"
51 
52 
53 /**
54  * There is some duplication between mesa's bufferobjects and our
55  * bufmgr buffers.  Both have an integer handle and a hashtable to
56  * lookup an opaque structure.  It would be nice if the handles and
57  * internal structure where somehow shared.
58  */
59 static struct gl_buffer_object *
st_bufferobj_alloc(struct gl_context * ctx,GLuint name)60 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
61 {
62    struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
63 
64    if (!st_obj)
65       return NULL;
66 
67    _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
68 
69    return &st_obj->Base;
70 }
71 
72 
73 
74 /**
75  * Deallocate/free a vertex/pixel buffer object.
76  * Called via glDeleteBuffersARB().
77  */
78 static void
st_bufferobj_free(struct gl_context * ctx,struct gl_buffer_object * obj)79 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
80 {
81    struct st_buffer_object *st_obj = st_buffer_object(obj);
82 
83    assert(obj->RefCount == 0);
84    _mesa_buffer_unmap_all_mappings(ctx, obj);
85 
86    if (st_obj->buffer)
87       pipe_resource_reference(&st_obj->buffer, NULL);
88 
89    _mesa_delete_buffer_object(ctx, obj);
90 }
91 
92 
93 
94 /**
95  * Replace data in a subrange of buffer object.  If the data range
96  * specified by size + offset extends beyond the end of the buffer or
97  * if data is NULL, no copy is performed.
98  * Called via glBufferSubDataARB().
99  */
100 static void
st_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)101 st_bufferobj_subdata(struct gl_context *ctx,
102                      GLintptrARB offset,
103                      GLsizeiptrARB size,
104                      const void * data, struct gl_buffer_object *obj)
105 {
106    struct st_buffer_object *st_obj = st_buffer_object(obj);
107 
108    /* we may be called from VBO code, so double-check params here */
109    assert(offset >= 0);
110    assert(size >= 0);
111    assert(offset + size <= obj->Size);
112 
113    if (!size)
114       return;
115 
116    /*
117     * According to ARB_vertex_buffer_object specification, if data is null,
118     * then the contents of the buffer object's data store is undefined. We just
119     * ignore, and leave it unchanged.
120     */
121    if (!data)
122       return;
123 
124    if (!st_obj->buffer) {
125       /* we probably ran out of memory during buffer allocation */
126       return;
127    }
128 
129    /* Now that transfers are per-context, we don't have to figure out
130     * flushing here.  Usually drivers won't need to flush in this case
131     * even if the buffer is currently referenced by hardware - they
132     * just queue the upload as dma rather than mapping the underlying
133     * buffer directly.
134     *
135     * If the buffer is mapped, suppress implicit buffer range invalidation
136     * by using PIPE_MAP_DIRECTLY.
137     */
138    struct pipe_context *pipe = st_context(ctx)->pipe;
139 
140    pipe->buffer_subdata(pipe, st_obj->buffer,
141                         _mesa_bufferobj_mapped(obj, MAP_USER) ?
142                            PIPE_MAP_DIRECTLY : 0,
143                         offset, size, data);
144 }
145 
146 
147 /**
148  * Called via glGetBufferSubDataARB().
149  */
150 static void
st_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)151 st_bufferobj_get_subdata(struct gl_context *ctx,
152                          GLintptrARB offset,
153                          GLsizeiptrARB size,
154                          void * data, struct gl_buffer_object *obj)
155 {
156    struct st_buffer_object *st_obj = st_buffer_object(obj);
157 
158    /* we may be called from VBO code, so double-check params here */
159    assert(offset >= 0);
160    assert(size >= 0);
161    assert(offset + size <= obj->Size);
162 
163    if (!size)
164       return;
165 
166    if (!st_obj->buffer) {
167       /* we probably ran out of memory during buffer allocation */
168       return;
169    }
170 
171    pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
172                     offset, size, data);
173 }
174 
175 
176 /**
177  * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
178  */
179 static unsigned
buffer_target_to_bind_flags(GLenum target)180 buffer_target_to_bind_flags(GLenum target)
181 {
182    switch (target) {
183    case GL_PIXEL_PACK_BUFFER_ARB:
184    case GL_PIXEL_UNPACK_BUFFER_ARB:
185       return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
186    case GL_ARRAY_BUFFER_ARB:
187       return PIPE_BIND_VERTEX_BUFFER;
188    case GL_ELEMENT_ARRAY_BUFFER_ARB:
189       return PIPE_BIND_INDEX_BUFFER;
190    case GL_TEXTURE_BUFFER:
191       return PIPE_BIND_SAMPLER_VIEW;
192    case GL_TRANSFORM_FEEDBACK_BUFFER:
193       return PIPE_BIND_STREAM_OUTPUT;
194    case GL_UNIFORM_BUFFER:
195       return PIPE_BIND_CONSTANT_BUFFER;
196    case GL_DRAW_INDIRECT_BUFFER:
197    case GL_PARAMETER_BUFFER_ARB:
198       return PIPE_BIND_COMMAND_ARGS_BUFFER;
199    case GL_ATOMIC_COUNTER_BUFFER:
200    case GL_SHADER_STORAGE_BUFFER:
201       return PIPE_BIND_SHADER_BUFFER;
202    case GL_QUERY_BUFFER:
203       return PIPE_BIND_QUERY_BUFFER;
204    default:
205       return 0;
206    }
207 }
208 
209 
210 /**
211  * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
212  */
213 static unsigned
storage_flags_to_buffer_flags(GLbitfield storageFlags)214 storage_flags_to_buffer_flags(GLbitfield storageFlags)
215 {
216    unsigned flags = 0;
217    if (storageFlags & GL_MAP_PERSISTENT_BIT)
218       flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
219    if (storageFlags & GL_MAP_COHERENT_BIT)
220       flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
221    if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
222       flags |= PIPE_RESOURCE_FLAG_SPARSE;
223    return flags;
224 }
225 
226 
227 /**
228  * From a buffer object's target, immutability flag, storage flags and
229  * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
230  * STREAM, etc).
231  */
232 static enum pipe_resource_usage
buffer_usage(GLenum target,GLboolean immutable,GLbitfield storageFlags,GLenum usage)233 buffer_usage(GLenum target, GLboolean immutable,
234              GLbitfield storageFlags, GLenum usage)
235 {
236    if (immutable) {
237       /* BufferStorage */
238       if (storageFlags & GL_CLIENT_STORAGE_BIT) {
239          if (storageFlags & GL_MAP_READ_BIT)
240             return PIPE_USAGE_STAGING;
241          else
242             return PIPE_USAGE_STREAM;
243       } else {
244          return PIPE_USAGE_DEFAULT;
245       }
246    }
247    else {
248       /* These are often read by the CPU, so enable CPU caches. */
249       if (target == GL_PIXEL_PACK_BUFFER ||
250           target == GL_PIXEL_UNPACK_BUFFER)
251          return PIPE_USAGE_STAGING;
252 
253       /* BufferData */
254       switch (usage) {
255       case GL_DYNAMIC_DRAW:
256       case GL_DYNAMIC_COPY:
257          return PIPE_USAGE_DYNAMIC;
258       case GL_STREAM_DRAW:
259       case GL_STREAM_COPY:
260          return PIPE_USAGE_STREAM;
261       case GL_STATIC_READ:
262       case GL_DYNAMIC_READ:
263       case GL_STREAM_READ:
264          return PIPE_USAGE_STAGING;
265       case GL_STATIC_DRAW:
266       case GL_STATIC_COPY:
267       default:
268          return PIPE_USAGE_DEFAULT;
269       }
270    }
271 }
272 
273 
274 static ALWAYS_INLINE GLboolean
bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)275 bufferobj_data(struct gl_context *ctx,
276                GLenum target,
277                GLsizeiptrARB size,
278                const void *data,
279                struct gl_memory_object *memObj,
280                GLuint64 offset,
281                GLenum usage,
282                GLbitfield storageFlags,
283                struct gl_buffer_object *obj)
284 {
285    struct st_context *st = st_context(ctx);
286    struct pipe_context *pipe = st->pipe;
287    struct pipe_screen *screen = pipe->screen;
288    struct st_buffer_object *st_obj = st_buffer_object(obj);
289    struct st_memory_object *st_mem_obj = st_memory_object(memObj);
290    bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
291 
292    if (size > UINT32_MAX || offset > UINT32_MAX) {
293       /* pipe_resource.width0 is 32 bits only and increasing it
294        * to 64 bits doesn't make much sense since hw support
295        * for > 4GB resources is limited.
296        */
297       st_obj->Base.Size = 0;
298       return GL_FALSE;
299    }
300 
301    if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
302        size && st_obj->buffer &&
303        st_obj->Base.Size == size &&
304        st_obj->Base.Usage == usage &&
305        st_obj->Base.StorageFlags == storageFlags) {
306       if (data) {
307          /* Just discard the old contents and write new data.
308           * This should be the same as creating a new buffer, but we avoid
309           * a lot of validation in Mesa.
310           *
311           * If the buffer is mapped, we can't discard it.
312           *
313           * PIPE_MAP_DIRECTLY supresses implicit buffer range
314           * invalidation.
315           */
316          pipe->buffer_subdata(pipe, st_obj->buffer,
317                               is_mapped ? PIPE_MAP_DIRECTLY :
318                                           PIPE_MAP_DISCARD_WHOLE_RESOURCE,
319                               0, size, data);
320          return GL_TRUE;
321       } else if (is_mapped) {
322          return GL_TRUE; /* can't reallocate, nothing to do */
323       } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
324          pipe->invalidate_resource(pipe, st_obj->buffer);
325          return GL_TRUE;
326       }
327    }
328 
329    st_obj->Base.Size = size;
330    st_obj->Base.Usage = usage;
331    st_obj->Base.StorageFlags = storageFlags;
332 
333    pipe_resource_reference( &st_obj->buffer, NULL );
334 
335    const unsigned bindings = buffer_target_to_bind_flags(target);
336 
337    if (ST_DEBUG & DEBUG_BUFFER) {
338       debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
339                    (int64_t) size, bindings);
340    }
341 
342    if (size != 0) {
343       struct pipe_resource buffer;
344 
345       memset(&buffer, 0, sizeof buffer);
346       buffer.target = PIPE_BUFFER;
347       buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
348       buffer.bind = bindings;
349       buffer.usage =
350          buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
351       buffer.flags = storage_flags_to_buffer_flags(storageFlags);
352       buffer.width0 = size;
353       buffer.height0 = 1;
354       buffer.depth0 = 1;
355       buffer.array_size = 1;
356 
357       if (st_mem_obj) {
358          st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
359                                                        st_mem_obj->memory,
360                                                        offset);
361       }
362       else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
363          st_obj->buffer =
364             screen->resource_from_user_memory(screen, &buffer, (void*)data);
365       }
366       else {
367          st_obj->buffer = screen->resource_create(screen, &buffer);
368 
369          if (st_obj->buffer && data)
370             pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
371       }
372 
373       if (!st_obj->buffer) {
374          /* out of memory */
375          st_obj->Base.Size = 0;
376          return GL_FALSE;
377       }
378    }
379 
380    /* The current buffer may be bound, so we have to revalidate all atoms that
381     * might be using it.
382     */
383    if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
384       ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
385    /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
386    /*    ctx->NewDriverState |= TODO: Handle indices as gallium state; */
387    if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
388       ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
389    if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
390       ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
391    if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
392       ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
393    if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
394       ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
395 
396    return GL_TRUE;
397 }
398 
399 /**
400  * Allocate space for and store data in a buffer object.  Any data that was
401  * previously stored in the buffer object is lost.  If data is NULL,
402  * memory will be allocated, but no copy will occur.
403  * Called via ctx->Driver.BufferData().
404  * \return GL_TRUE for success, GL_FALSE if out of memory
405  */
406 static GLboolean
st_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)407 st_bufferobj_data(struct gl_context *ctx,
408                   GLenum target,
409                   GLsizeiptrARB size,
410                   const void *data,
411                   GLenum usage,
412                   GLbitfield storageFlags,
413                   struct gl_buffer_object *obj)
414 {
415    return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
416 }
417 
418 static GLboolean
st_bufferobj_data_mem(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,struct gl_buffer_object * bufObj)419 st_bufferobj_data_mem(struct gl_context *ctx,
420                       GLenum target,
421                       GLsizeiptrARB size,
422                       struct gl_memory_object *memObj,
423                       GLuint64 offset,
424                       GLenum usage,
425                       struct gl_buffer_object *bufObj)
426 {
427    return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
428 }
429 
430 /**
431  * Called via glInvalidateBuffer(Sub)Data.
432  */
433 static void
st_bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)434 st_bufferobj_invalidate(struct gl_context *ctx,
435                         struct gl_buffer_object *obj,
436                         GLintptr offset,
437                         GLsizeiptr size)
438 {
439    struct st_context *st = st_context(ctx);
440    struct pipe_context *pipe = st->pipe;
441    struct st_buffer_object *st_obj = st_buffer_object(obj);
442 
443    /* We ignore partial invalidates. */
444    if (offset != 0 || size != obj->Size)
445       return;
446 
447    /* If the buffer is mapped, we can't invalidate it. */
448    if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
449       return;
450 
451    pipe->invalidate_resource(pipe, st_obj->buffer);
452 }
453 
454 
455 /**
456  * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
457  * \param wholeBuffer  is the whole buffer being mapped?
458  */
459 enum pipe_map_flags
st_access_flags_to_transfer_flags(GLbitfield access,bool wholeBuffer)460 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
461 {
462    enum pipe_map_flags flags = 0;
463 
464    if (access & GL_MAP_WRITE_BIT)
465       flags |= PIPE_MAP_WRITE;
466 
467    if (access & GL_MAP_READ_BIT)
468       flags |= PIPE_MAP_READ;
469 
470    if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
471       flags |= PIPE_MAP_FLUSH_EXPLICIT;
472 
473    if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
474       flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
475    }
476    else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
477       if (wholeBuffer)
478          flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
479       else
480          flags |= PIPE_MAP_DISCARD_RANGE;
481    }
482 
483    if (access & GL_MAP_UNSYNCHRONIZED_BIT)
484       flags |= PIPE_MAP_UNSYNCHRONIZED;
485 
486    if (access & GL_MAP_PERSISTENT_BIT)
487       flags |= PIPE_MAP_PERSISTENT;
488 
489    if (access & GL_MAP_COHERENT_BIT)
490       flags |= PIPE_MAP_COHERENT;
491 
492    /* ... other flags ...
493    */
494 
495    if (access & MESA_MAP_NOWAIT_BIT)
496       flags |= PIPE_MAP_DONTBLOCK;
497    if (access & MESA_MAP_THREAD_SAFE_BIT)
498       flags |= PIPE_MAP_THREAD_SAFE;
499 
500    return flags;
501 }
502 
503 
504 /**
505  * Called via glMapBufferRange().
506  */
507 static void *
st_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)508 st_bufferobj_map_range(struct gl_context *ctx,
509                        GLintptr offset, GLsizeiptr length, GLbitfield access,
510                        struct gl_buffer_object *obj,
511                        gl_map_buffer_index index)
512 {
513    struct pipe_context *pipe = st_context(ctx)->pipe;
514    struct st_buffer_object *st_obj = st_buffer_object(obj);
515 
516    assert(offset >= 0);
517    assert(length >= 0);
518    assert(offset < obj->Size);
519    assert(offset + length <= obj->Size);
520 
521    const enum pipe_map_flags transfer_flags =
522       st_access_flags_to_transfer_flags(access,
523                                         offset == 0 && length == obj->Size);
524 
525    obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
526                                                         st_obj->buffer,
527                                                         offset, length,
528                                                         transfer_flags,
529                                                         &st_obj->transfer[index]);
530    if (obj->Mappings[index].Pointer) {
531       obj->Mappings[index].Offset = offset;
532       obj->Mappings[index].Length = length;
533       obj->Mappings[index].AccessFlags = access;
534    }
535    else {
536       st_obj->transfer[index] = NULL;
537    }
538 
539    return obj->Mappings[index].Pointer;
540 }
541 
542 
543 static void
st_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)544 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
545                                 GLintptr offset, GLsizeiptr length,
546                                 struct gl_buffer_object *obj,
547                                 gl_map_buffer_index index)
548 {
549    struct pipe_context *pipe = st_context(ctx)->pipe;
550    struct st_buffer_object *st_obj = st_buffer_object(obj);
551 
552    /* Subrange is relative to mapped range */
553    assert(offset >= 0);
554    assert(length >= 0);
555    assert(offset + length <= obj->Mappings[index].Length);
556    assert(obj->Mappings[index].Pointer);
557 
558    if (!length)
559       return;
560 
561    pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
562                                   obj->Mappings[index].Offset + offset,
563                                   length);
564 }
565 
566 
567 /**
568  * Called via glUnmapBufferARB().
569  */
570 static GLboolean
st_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)571 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
572                    gl_map_buffer_index index)
573 {
574    struct pipe_context *pipe = st_context(ctx)->pipe;
575    struct st_buffer_object *st_obj = st_buffer_object(obj);
576 
577    if (obj->Mappings[index].Length)
578       pipe_buffer_unmap(pipe, st_obj->transfer[index]);
579 
580    st_obj->transfer[index] = NULL;
581    obj->Mappings[index].Pointer = NULL;
582    obj->Mappings[index].Offset = 0;
583    obj->Mappings[index].Length = 0;
584    return GL_TRUE;
585 }
586 
587 
588 /**
589  * Called via glCopyBufferSubData().
590  */
591 static void
st_copy_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)592 st_copy_buffer_subdata(struct gl_context *ctx,
593                        struct gl_buffer_object *src,
594                        struct gl_buffer_object *dst,
595                        GLintptr readOffset, GLintptr writeOffset,
596                        GLsizeiptr size)
597 {
598    struct pipe_context *pipe = st_context(ctx)->pipe;
599    struct st_buffer_object *srcObj = st_buffer_object(src);
600    struct st_buffer_object *dstObj = st_buffer_object(dst);
601    struct pipe_box box;
602 
603    if (!size)
604       return;
605 
606    /* buffer should not already be mapped */
607    assert(!_mesa_check_disallowed_mapping(src));
608    /* dst can be mapped, just not the same range as the target range */
609 
610    u_box_1d(readOffset, size, &box);
611 
612    pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
613                               srcObj->buffer, 0, &box);
614 }
615 
616 /**
617  * Called via glClearBufferSubData().
618  */
619 static void
st_clear_buffer_subdata(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const void * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)620 st_clear_buffer_subdata(struct gl_context *ctx,
621                         GLintptr offset, GLsizeiptr size,
622                         const void *clearValue,
623                         GLsizeiptr clearValueSize,
624                         struct gl_buffer_object *bufObj)
625 {
626    struct pipe_context *pipe = st_context(ctx)->pipe;
627    struct st_buffer_object *buf = st_buffer_object(bufObj);
628    static const char zeros[16] = {0};
629 
630    if (!pipe->clear_buffer) {
631       _mesa_ClearBufferSubData_sw(ctx, offset, size,
632                                   clearValue, clearValueSize, bufObj);
633       return;
634    }
635 
636    if (!clearValue)
637       clearValue = zeros;
638 
639    pipe->clear_buffer(pipe, buf->buffer, offset, size,
640                       clearValue, clearValueSize);
641 }
642 
643 static void
st_bufferobj_page_commitment(struct gl_context * ctx,struct gl_buffer_object * bufferObj,GLintptr offset,GLsizeiptr size,GLboolean commit)644 st_bufferobj_page_commitment(struct gl_context *ctx,
645                              struct gl_buffer_object *bufferObj,
646                              GLintptr offset, GLsizeiptr size,
647                              GLboolean commit)
648 {
649    struct pipe_context *pipe = st_context(ctx)->pipe;
650    struct st_buffer_object *buf = st_buffer_object(bufferObj);
651    struct pipe_box box;
652 
653    u_box_1d(offset, size, &box);
654 
655    if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
656       _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
657       return;
658    }
659 }
660 
661 void
st_init_bufferobject_functions(struct pipe_screen * screen,struct dd_function_table * functions)662 st_init_bufferobject_functions(struct pipe_screen *screen,
663                                struct dd_function_table *functions)
664 {
665    functions->NewBufferObject = st_bufferobj_alloc;
666    functions->DeleteBuffer = st_bufferobj_free;
667    functions->BufferData = st_bufferobj_data;
668    functions->BufferDataMem = st_bufferobj_data_mem;
669    functions->BufferSubData = st_bufferobj_subdata;
670    functions->GetBufferSubData = st_bufferobj_get_subdata;
671    functions->MapBufferRange = st_bufferobj_map_range;
672    functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
673    functions->UnmapBuffer = st_bufferobj_unmap;
674    functions->CopyBufferSubData = st_copy_buffer_subdata;
675    functions->ClearBufferSubData = st_clear_buffer_subdata;
676    functions->BufferPageCommitment = st_bufferobj_page_commitment;
677 
678    if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
679       functions->InvalidateBufferSubData = st_bufferobj_invalidate;
680 }
681