1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
5 * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26
27 /**
28 * \file bufferobj.c
29 * \brief Functions for the GL_ARB_vertex/pixel_buffer_object extensions.
30 * \author Brian Paul, Ian Romanick
31 */
32
33 #include <stdbool.h>
34 #include <inttypes.h> /* for PRId64 macro */
35 #include "util/debug.h"
36 #include "glheader.h"
37 #include "enums.h"
38 #include "hash.h"
39 #include "context.h"
40 #include "bufferobj.h"
41 #include "externalobjects.h"
42 #include "mtypes.h"
43 #include "teximage.h"
44 #include "glformats.h"
45 #include "texstore.h"
46 #include "transformfeedback.h"
47 #include "varray.h"
48 #include "util/u_atomic.h"
49 #include "util/u_memory.h"
50 #include "api_exec_decl.h"
51 #include "util/set.h"
52
53 #include "state_tracker/st_debug.h"
54 #include "state_tracker/st_atom.h"
55 #include "frontend/api.h"
56
57 #include "util/u_inlines.h"
58 /* Debug flags */
59 /*#define VBO_DEBUG*/
60 /*#define BOUNDS_CHECK*/
61
62
63 /**
64 * We count the number of buffer modification calls to check for
65 * inefficient buffer use. This is the number of such calls before we
66 * issue a warning.
67 */
68 #define BUFFER_WARNING_CALL_COUNT 4
69
70
71 /**
72 * Replace data in a subrange of buffer object. If the data range
73 * specified by size + offset extends beyond the end of the buffer or
74 * if data is NULL, no copy is performed.
75 * Called via glBufferSubDataARB().
76 */
77 void
_mesa_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)78 _mesa_bufferobj_subdata(struct gl_context *ctx,
79 GLintptrARB offset,
80 GLsizeiptrARB size,
81 const void *data, struct gl_buffer_object *obj)
82 {
83 /* we may be called from VBO code, so double-check params here */
84 assert(offset >= 0);
85 assert(size >= 0);
86 assert(offset + size <= obj->Size);
87
88 if (!size)
89 return;
90
91 /*
92 * According to ARB_vertex_buffer_object specification, if data is null,
93 * then the contents of the buffer object's data store is undefined. We just
94 * ignore, and leave it unchanged.
95 */
96 if (!data)
97 return;
98
99 if (!obj->buffer) {
100 /* we probably ran out of memory during buffer allocation */
101 return;
102 }
103
104 /* Now that transfers are per-context, we don't have to figure out
105 * flushing here. Usually drivers won't need to flush in this case
106 * even if the buffer is currently referenced by hardware - they
107 * just queue the upload as dma rather than mapping the underlying
108 * buffer directly.
109 *
110 * If the buffer is mapped, suppress implicit buffer range invalidation
111 * by using PIPE_MAP_DIRECTLY.
112 */
113 struct pipe_context *pipe = ctx->pipe;
114
115 pipe->buffer_subdata(pipe, obj->buffer,
116 _mesa_bufferobj_mapped(obj, MAP_USER) ?
117 PIPE_MAP_DIRECTLY : 0,
118 offset, size, data);
119 }
120
121
122 /**
123 * Called via glGetBufferSubDataARB().
124 */
125 static void
bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)126 bufferobj_get_subdata(struct gl_context *ctx,
127 GLintptrARB offset,
128 GLsizeiptrARB size,
129 void *data, struct gl_buffer_object *obj)
130 {
131 /* we may be called from VBO code, so double-check params here */
132 assert(offset >= 0);
133 assert(size >= 0);
134 assert(offset + size <= obj->Size);
135
136 if (!size)
137 return;
138
139 if (!obj->buffer) {
140 /* we probably ran out of memory during buffer allocation */
141 return;
142 }
143
144 pipe_buffer_read(ctx->pipe, obj->buffer,
145 offset, size, data);
146 }
147
148 void
_mesa_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)149 _mesa_bufferobj_get_subdata(struct gl_context *ctx,
150 GLintptrARB offset,
151 GLsizeiptrARB size,
152 void *data, struct gl_buffer_object *obj)
153 {
154 bufferobj_get_subdata(ctx, offset, size, data, obj);
155 }
156
157 /**
158 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
159 */
160 static unsigned
buffer_target_to_bind_flags(GLenum target)161 buffer_target_to_bind_flags(GLenum target)
162 {
163 switch (target) {
164 case GL_PIXEL_PACK_BUFFER_ARB:
165 case GL_PIXEL_UNPACK_BUFFER_ARB:
166 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
167 case GL_ARRAY_BUFFER_ARB:
168 return PIPE_BIND_VERTEX_BUFFER;
169 case GL_ELEMENT_ARRAY_BUFFER_ARB:
170 return PIPE_BIND_INDEX_BUFFER;
171 case GL_TEXTURE_BUFFER:
172 return PIPE_BIND_SAMPLER_VIEW;
173 case GL_TRANSFORM_FEEDBACK_BUFFER:
174 return PIPE_BIND_STREAM_OUTPUT;
175 case GL_UNIFORM_BUFFER:
176 return PIPE_BIND_CONSTANT_BUFFER;
177 case GL_DRAW_INDIRECT_BUFFER:
178 case GL_PARAMETER_BUFFER_ARB:
179 return PIPE_BIND_COMMAND_ARGS_BUFFER;
180 case GL_ATOMIC_COUNTER_BUFFER:
181 case GL_SHADER_STORAGE_BUFFER:
182 return PIPE_BIND_SHADER_BUFFER;
183 case GL_QUERY_BUFFER:
184 return PIPE_BIND_QUERY_BUFFER;
185 default:
186 return 0;
187 }
188 }
189
190
191 /**
192 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
193 */
194 static unsigned
storage_flags_to_buffer_flags(GLbitfield storageFlags)195 storage_flags_to_buffer_flags(GLbitfield storageFlags)
196 {
197 unsigned flags = 0;
198 if (storageFlags & GL_MAP_PERSISTENT_BIT)
199 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
200 if (storageFlags & GL_MAP_COHERENT_BIT)
201 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
202 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
203 flags |= PIPE_RESOURCE_FLAG_SPARSE;
204 return flags;
205 }
206
207
208 /**
209 * From a buffer object's target, immutability flag, storage flags and
210 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
211 * STREAM, etc).
212 */
213 static enum pipe_resource_usage
buffer_usage(GLenum target,GLboolean immutable,GLbitfield storageFlags,GLenum usage)214 buffer_usage(GLenum target, GLboolean immutable,
215 GLbitfield storageFlags, GLenum usage)
216 {
217 /* "immutable" means that "storageFlags" was set by the user and "usage"
218 * was guessed by Mesa. Otherwise, "usage" was set by the user and
219 * storageFlags was guessed by Mesa.
220 *
221 * Therefore, use storageFlags with immutable, else use "usage".
222 */
223 if (immutable) {
224 /* BufferStorage */
225 if (storageFlags & GL_MAP_READ_BIT)
226 return PIPE_USAGE_STAGING;
227 else if (storageFlags & GL_CLIENT_STORAGE_BIT)
228 return PIPE_USAGE_STREAM;
229 else
230 return PIPE_USAGE_DEFAULT;
231 }
232 else {
233 /* These are often read by the CPU, so enable CPU caches. */
234 if (target == GL_PIXEL_PACK_BUFFER ||
235 target == GL_PIXEL_UNPACK_BUFFER)
236 return PIPE_USAGE_STAGING;
237
238 /* BufferData */
239 switch (usage) {
240 case GL_DYNAMIC_DRAW:
241 case GL_DYNAMIC_COPY:
242 return PIPE_USAGE_DYNAMIC;
243 case GL_STREAM_DRAW:
244 case GL_STREAM_COPY:
245 return PIPE_USAGE_STREAM;
246 case GL_STATIC_READ:
247 case GL_DYNAMIC_READ:
248 case GL_STREAM_READ:
249 return PIPE_USAGE_STAGING;
250 case GL_STATIC_DRAW:
251 case GL_STATIC_COPY:
252 default:
253 return PIPE_USAGE_DEFAULT;
254 }
255 }
256 }
257
258
259 static ALWAYS_INLINE GLboolean
bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)260 bufferobj_data(struct gl_context *ctx,
261 GLenum target,
262 GLsizeiptrARB size,
263 const void *data,
264 struct gl_memory_object *memObj,
265 GLuint64 offset,
266 GLenum usage,
267 GLbitfield storageFlags,
268 struct gl_buffer_object *obj)
269 {
270 struct pipe_context *pipe = ctx->pipe;
271 struct pipe_screen *screen = pipe->screen;
272 bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
273
274 if (size > UINT32_MAX || offset > UINT32_MAX) {
275 /* pipe_resource.width0 is 32 bits only and increasing it
276 * to 64 bits doesn't make much sense since hw support
277 * for > 4GB resources is limited.
278 */
279 obj->Size = 0;
280 return GL_FALSE;
281 }
282
283 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
284 size && obj->buffer &&
285 obj->Size == size &&
286 obj->Usage == usage &&
287 obj->StorageFlags == storageFlags) {
288 if (data) {
289 /* Just discard the old contents and write new data.
290 * This should be the same as creating a new buffer, but we avoid
291 * a lot of validation in Mesa.
292 *
293 * If the buffer is mapped, we can't discard it.
294 *
295 * PIPE_MAP_DIRECTLY supresses implicit buffer range
296 * invalidation.
297 */
298 pipe->buffer_subdata(pipe, obj->buffer,
299 is_mapped ? PIPE_MAP_DIRECTLY :
300 PIPE_MAP_DISCARD_WHOLE_RESOURCE,
301 0, size, data);
302 return GL_TRUE;
303 } else if (is_mapped) {
304 return GL_TRUE; /* can't reallocate, nothing to do */
305 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
306 pipe->invalidate_resource(pipe, obj->buffer);
307 return GL_TRUE;
308 }
309 }
310
311 obj->Size = size;
312 obj->Usage = usage;
313 obj->StorageFlags = storageFlags;
314
315 _mesa_bufferobj_release_buffer(obj);
316
317 unsigned bindings = buffer_target_to_bind_flags(target);
318
319 if (storageFlags & MESA_GALLIUM_VERTEX_STATE_STORAGE)
320 bindings |= PIPE_BIND_VERTEX_STATE;
321
322 if (ST_DEBUG & DEBUG_BUFFER) {
323 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
324 (int64_t) size, bindings);
325 }
326
327 if (size != 0) {
328 struct pipe_resource buffer;
329
330 memset(&buffer, 0, sizeof buffer);
331 buffer.target = PIPE_BUFFER;
332 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
333 buffer.bind = bindings;
334 buffer.usage =
335 buffer_usage(target, obj->Immutable, storageFlags, usage);
336 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
337 buffer.width0 = size;
338 buffer.height0 = 1;
339 buffer.depth0 = 1;
340 buffer.array_size = 1;
341
342 if (memObj) {
343 obj->buffer = screen->resource_from_memobj(screen, &buffer,
344 memObj->memory,
345 offset);
346 }
347 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
348 obj->buffer =
349 screen->resource_from_user_memory(screen, &buffer, (void*)data);
350 }
351 else {
352 obj->buffer = screen->resource_create(screen, &buffer);
353
354 if (obj->buffer && data)
355 pipe_buffer_write(pipe, obj->buffer, 0, size, data);
356 }
357
358 if (!obj->buffer) {
359 /* out of memory */
360 obj->Size = 0;
361 return GL_FALSE;
362 }
363
364 obj->private_refcount_ctx = ctx;
365 }
366
367 /* The current buffer may be bound, so we have to revalidate all atoms that
368 * might be using it.
369 */
370 if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
371 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
372 if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
373 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
374 if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
375 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
376 if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
377 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
378 if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
379 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
380
381 return GL_TRUE;
382 }
383
384 /**
385 * Allocate space for and store data in a buffer object. Any data that was
386 * previously stored in the buffer object is lost. If data is NULL,
387 * memory will be allocated, but no copy will occur.
388 * Called via ctx->Driver.BufferData().
389 * \return GL_TRUE for success, GL_FALSE if out of memory
390 */
391 GLboolean
_mesa_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)392 _mesa_bufferobj_data(struct gl_context *ctx,
393 GLenum target,
394 GLsizeiptrARB size,
395 const void *data,
396 GLenum usage,
397 GLbitfield storageFlags,
398 struct gl_buffer_object *obj)
399 {
400 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
401 }
402
403 static GLboolean
bufferobj_data_mem(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,struct gl_buffer_object * bufObj)404 bufferobj_data_mem(struct gl_context *ctx,
405 GLenum target,
406 GLsizeiptrARB size,
407 struct gl_memory_object *memObj,
408 GLuint64 offset,
409 GLenum usage,
410 struct gl_buffer_object *bufObj)
411 {
412 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, GL_DYNAMIC_STORAGE_BIT, bufObj);
413 }
414
415 /**
416 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
417 * \param wholeBuffer is the whole buffer being mapped?
418 */
419 enum pipe_map_flags
_mesa_access_flags_to_transfer_flags(GLbitfield access,bool wholeBuffer)420 _mesa_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
421 {
422 enum pipe_map_flags flags = 0;
423
424 if (access & GL_MAP_WRITE_BIT)
425 flags |= PIPE_MAP_WRITE;
426
427 if (access & GL_MAP_READ_BIT)
428 flags |= PIPE_MAP_READ;
429
430 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
431 flags |= PIPE_MAP_FLUSH_EXPLICIT;
432
433 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
434 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
435 }
436 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
437 if (wholeBuffer)
438 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
439 else
440 flags |= PIPE_MAP_DISCARD_RANGE;
441 }
442
443 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
444 flags |= PIPE_MAP_UNSYNCHRONIZED;
445
446 if (access & GL_MAP_PERSISTENT_BIT)
447 flags |= PIPE_MAP_PERSISTENT;
448
449 if (access & GL_MAP_COHERENT_BIT)
450 flags |= PIPE_MAP_COHERENT;
451
452 /* ... other flags ...
453 */
454
455 if (access & MESA_MAP_NOWAIT_BIT)
456 flags |= PIPE_MAP_DONTBLOCK;
457 if (access & MESA_MAP_THREAD_SAFE_BIT)
458 flags |= PIPE_MAP_THREAD_SAFE;
459 if (access & MESA_MAP_ONCE)
460 flags |= PIPE_MAP_ONCE;
461
462 return flags;
463 }
464
465
466 /**
467 * Called via glMapBufferRange().
468 */
469 void *
_mesa_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)470 _mesa_bufferobj_map_range(struct gl_context *ctx,
471 GLintptr offset, GLsizeiptr length, GLbitfield access,
472 struct gl_buffer_object *obj,
473 gl_map_buffer_index index)
474 {
475 struct pipe_context *pipe = ctx->pipe;
476
477 assert(offset >= 0);
478 assert(length >= 0);
479 assert(offset < obj->Size);
480 assert(offset + length <= obj->Size);
481
482 enum pipe_map_flags transfer_flags =
483 _mesa_access_flags_to_transfer_flags(access,
484 offset == 0 && length == obj->Size);
485
486 /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
487 * In this case, the the UNSYNC is a bit redundant, but the games rely
488 * on the driver rebinding/replacing the backing storage rather than
489 * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
490 */
491 if (unlikely(ctx->st_opts->ignore_map_unsynchronized)) {
492 if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
493 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
494 }
495
496 if (ctx->Const.ForceMapBufferSynchronized)
497 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
498
499 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
500 obj->buffer,
501 offset, length,
502 transfer_flags,
503 &obj->transfer[index]);
504 if (obj->Mappings[index].Pointer) {
505 obj->Mappings[index].Offset = offset;
506 obj->Mappings[index].Length = length;
507 obj->Mappings[index].AccessFlags = access;
508 }
509 else {
510 obj->transfer[index] = NULL;
511 }
512
513 return obj->Mappings[index].Pointer;
514 }
515
516
517 void
_mesa_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)518 _mesa_bufferobj_flush_mapped_range(struct gl_context *ctx,
519 GLintptr offset, GLsizeiptr length,
520 struct gl_buffer_object *obj,
521 gl_map_buffer_index index)
522 {
523 struct pipe_context *pipe = ctx->pipe;
524
525 /* Subrange is relative to mapped range */
526 assert(offset >= 0);
527 assert(length >= 0);
528 assert(offset + length <= obj->Mappings[index].Length);
529 assert(obj->Mappings[index].Pointer);
530
531 if (!length)
532 return;
533
534 pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
535 obj->Mappings[index].Offset + offset,
536 length);
537 }
538
539
540 /**
541 * Called via glUnmapBufferARB().
542 */
543 GLboolean
_mesa_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)544 _mesa_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
545 gl_map_buffer_index index)
546 {
547 struct pipe_context *pipe = ctx->pipe;
548
549 if (obj->Mappings[index].Length)
550 pipe_buffer_unmap(pipe, obj->transfer[index]);
551
552 obj->transfer[index] = NULL;
553 obj->Mappings[index].Pointer = NULL;
554 obj->Mappings[index].Offset = 0;
555 obj->Mappings[index].Length = 0;
556 return GL_TRUE;
557 }
558
559
560 /**
561 * Called via glCopyBufferSubData().
562 */
563 static void
bufferobj_copy_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)564 bufferobj_copy_subdata(struct gl_context *ctx,
565 struct gl_buffer_object *src,
566 struct gl_buffer_object *dst,
567 GLintptr readOffset, GLintptr writeOffset,
568 GLsizeiptr size)
569 {
570 struct pipe_context *pipe = ctx->pipe;
571 struct pipe_box box;
572
573 dst->MinMaxCacheDirty = true;
574 if (!size)
575 return;
576
577 /* buffer should not already be mapped */
578 assert(!_mesa_check_disallowed_mapping(src));
579 /* dst can be mapped, just not the same range as the target range */
580
581 u_box_1d(readOffset, size, &box);
582
583 pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
584 src->buffer, 0, &box);
585 }
586
587 static void
clear_buffer_subdata_sw(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const GLvoid * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)588 clear_buffer_subdata_sw(struct gl_context *ctx,
589 GLintptr offset, GLsizeiptr size,
590 const GLvoid *clearValue,
591 GLsizeiptr clearValueSize,
592 struct gl_buffer_object *bufObj)
593 {
594 GLsizeiptr i;
595 GLubyte *dest;
596
597 dest = _mesa_bufferobj_map_range(ctx, offset, size,
598 GL_MAP_WRITE_BIT |
599 GL_MAP_INVALIDATE_RANGE_BIT,
600 bufObj, MAP_INTERNAL);
601
602 if (!dest) {
603 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
604 return;
605 }
606
607 if (clearValue == NULL) {
608 /* Clear with zeros, per the spec */
609 memset(dest, 0, size);
610 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
611 return;
612 }
613
614 for (i = 0; i < size/clearValueSize; ++i) {
615 memcpy(dest, clearValue, clearValueSize);
616 dest += clearValueSize;
617 }
618
619 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
620 }
621
622 /**
623 * Helper to warn of possible performance issues, such as frequently
624 * updating a buffer created with GL_STATIC_DRAW. Called via the macro
625 * below.
626 */
627 static void
buffer_usage_warning(struct gl_context * ctx,GLuint * id,const char * fmt,...)628 buffer_usage_warning(struct gl_context *ctx, GLuint *id, const char *fmt, ...)
629 {
630 va_list args;
631
632 va_start(args, fmt);
633 _mesa_gl_vdebugf(ctx, id,
634 MESA_DEBUG_SOURCE_API,
635 MESA_DEBUG_TYPE_PERFORMANCE,
636 MESA_DEBUG_SEVERITY_MEDIUM,
637 fmt, args);
638 va_end(args);
639 }
640
641 #define BUFFER_USAGE_WARNING(CTX, FMT, ...) \
642 do { \
643 static GLuint id = 0; \
644 buffer_usage_warning(CTX, &id, FMT, ##__VA_ARGS__); \
645 } while (0)
646
647
648 /**
649 * Used as a placeholder for buffer objects between glGenBuffers() and
650 * glBindBuffer() so that glIsBuffer() can work correctly.
651 */
652 static struct gl_buffer_object DummyBufferObject = {
653 .MinMaxCacheMutex = _SIMPLE_MTX_INITIALIZER_NP,
654 .RefCount = 1000*1000*1000, /* never delete */
655 };
656
657
658 /**
659 * Return pointer to address of a buffer object target.
660 * \param ctx the GL context
661 * \param target the buffer object target to be retrieved.
662 * \return pointer to pointer to the buffer object bound to \c target in the
663 * specified context or \c NULL if \c target is invalid.
664 */
665 static inline struct gl_buffer_object **
get_buffer_target(struct gl_context * ctx,GLenum target)666 get_buffer_target(struct gl_context *ctx, GLenum target)
667 {
668 /* Other targets are only supported in desktop OpenGL and OpenGL ES 3.0. */
669 if (!_mesa_is_desktop_gl(ctx) && !_mesa_is_gles3(ctx)) {
670 switch (target) {
671 case GL_ARRAY_BUFFER:
672 case GL_ELEMENT_ARRAY_BUFFER:
673 break;
674 case GL_PIXEL_PACK_BUFFER:
675 case GL_PIXEL_UNPACK_BUFFER:
676 if (!ctx->Extensions.EXT_pixel_buffer_object)
677 return NULL;
678 break;
679 default:
680 return NULL;
681 }
682 }
683
684 switch (target) {
685 case GL_ARRAY_BUFFER_ARB:
686 return &ctx->Array.ArrayBufferObj;
687 case GL_ELEMENT_ARRAY_BUFFER_ARB:
688 return &ctx->Array.VAO->IndexBufferObj;
689 case GL_PIXEL_PACK_BUFFER_EXT:
690 return &ctx->Pack.BufferObj;
691 case GL_PIXEL_UNPACK_BUFFER_EXT:
692 return &ctx->Unpack.BufferObj;
693 case GL_COPY_READ_BUFFER:
694 return &ctx->CopyReadBuffer;
695 case GL_COPY_WRITE_BUFFER:
696 return &ctx->CopyWriteBuffer;
697 case GL_QUERY_BUFFER:
698 if (_mesa_has_ARB_query_buffer_object(ctx))
699 return &ctx->QueryBuffer;
700 break;
701 case GL_DRAW_INDIRECT_BUFFER:
702 if ((_mesa_is_desktop_gl(ctx) && ctx->Extensions.ARB_draw_indirect) ||
703 _mesa_is_gles31(ctx)) {
704 return &ctx->DrawIndirectBuffer;
705 }
706 break;
707 case GL_PARAMETER_BUFFER_ARB:
708 if (_mesa_has_ARB_indirect_parameters(ctx)) {
709 return &ctx->ParameterBuffer;
710 }
711 break;
712 case GL_DISPATCH_INDIRECT_BUFFER:
713 if (_mesa_has_compute_shaders(ctx)) {
714 return &ctx->DispatchIndirectBuffer;
715 }
716 break;
717 case GL_TRANSFORM_FEEDBACK_BUFFER:
718 if (ctx->Extensions.EXT_transform_feedback) {
719 return &ctx->TransformFeedback.CurrentBuffer;
720 }
721 break;
722 case GL_TEXTURE_BUFFER:
723 if (_mesa_has_ARB_texture_buffer_object(ctx) ||
724 _mesa_has_OES_texture_buffer(ctx)) {
725 return &ctx->Texture.BufferObject;
726 }
727 break;
728 case GL_UNIFORM_BUFFER:
729 if (ctx->Extensions.ARB_uniform_buffer_object) {
730 return &ctx->UniformBuffer;
731 }
732 break;
733 case GL_SHADER_STORAGE_BUFFER:
734 if (ctx->Extensions.ARB_shader_storage_buffer_object || _mesa_is_gles31(ctx)) {
735 return &ctx->ShaderStorageBuffer;
736 }
737 break;
738 case GL_ATOMIC_COUNTER_BUFFER:
739 if (ctx->Extensions.ARB_shader_atomic_counters || _mesa_is_gles31(ctx)) {
740 return &ctx->AtomicBuffer;
741 }
742 break;
743 case GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD:
744 if (ctx->Extensions.AMD_pinned_memory) {
745 return &ctx->ExternalVirtualMemoryBuffer;
746 }
747 break;
748 default:
749 return NULL;
750 }
751 return NULL;
752 }
753
754
755 /**
756 * Get the buffer object bound to the specified target in a GL context.
757 * \param ctx the GL context
758 * \param target the buffer object target to be retrieved.
759 * \param error the GL error to record if target is illegal.
760 * \return pointer to the buffer object bound to \c target in the
761 * specified context or \c NULL if \c target is invalid.
762 */
763 static inline struct gl_buffer_object *
get_buffer(struct gl_context * ctx,const char * func,GLenum target,GLenum error)764 get_buffer(struct gl_context *ctx, const char *func, GLenum target,
765 GLenum error)
766 {
767 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target);
768
769 if (!bufObj) {
770 _mesa_error(ctx, GL_INVALID_ENUM, "%s(target)", func);
771 return NULL;
772 }
773
774 if (!*bufObj) {
775 _mesa_error(ctx, error, "%s(no buffer bound)", func);
776 return NULL;
777 }
778
779 return *bufObj;
780 }
781
782
783 /**
784 * Convert a GLbitfield describing the mapped buffer access flags
785 * into one of GL_READ_WRITE, GL_READ_ONLY, or GL_WRITE_ONLY.
786 */
787 static GLenum
simplified_access_mode(struct gl_context * ctx,GLbitfield access)788 simplified_access_mode(struct gl_context *ctx, GLbitfield access)
789 {
790 const GLbitfield rwFlags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
791 if ((access & rwFlags) == rwFlags)
792 return GL_READ_WRITE;
793 if ((access & GL_MAP_READ_BIT) == GL_MAP_READ_BIT)
794 return GL_READ_ONLY;
795 if ((access & GL_MAP_WRITE_BIT) == GL_MAP_WRITE_BIT)
796 return GL_WRITE_ONLY;
797
798 /* Otherwise, AccessFlags is zero (the default state).
799 *
800 * Table 2.6 on page 31 (page 44 of the PDF) of the OpenGL 1.5 spec says:
801 *
802 * Name Type Initial Value Legal Values
803 * ... ... ... ...
804 * BUFFER_ACCESS enum READ_WRITE READ_ONLY, WRITE_ONLY
805 * READ_WRITE
806 *
807 * However, table 6.8 in the GL_OES_mapbuffer extension says:
808 *
809 * Get Value Type Get Command Value Description
810 * --------- ---- ----------- ----- -----------
811 * BUFFER_ACCESS_OES Z1 GetBufferParameteriv WRITE_ONLY_OES buffer map flag
812 *
813 * The difference is because GL_OES_mapbuffer only supports mapping buffers
814 * write-only.
815 */
816 assert(access == 0);
817
818 return _mesa_is_gles(ctx) ? GL_WRITE_ONLY : GL_READ_WRITE;
819 }
820
821
822 /**
823 * Test if the buffer is mapped, and if so, if the mapped range overlaps the
824 * given range.
825 * The regions do not overlap if and only if the end of the given
826 * region is before the mapped region or the start of the given region
827 * is after the mapped region.
828 *
829 * \param obj Buffer object target on which to operate.
830 * \param offset Offset of the first byte of the subdata range.
831 * \param size Size, in bytes, of the subdata range.
832 * \return true if ranges overlap, false otherwise
833 *
834 */
835 static bool
bufferobj_range_mapped(const struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)836 bufferobj_range_mapped(const struct gl_buffer_object *obj,
837 GLintptr offset, GLsizeiptr size)
838 {
839 if (_mesa_bufferobj_mapped(obj, MAP_USER)) {
840 const GLintptr end = offset + size;
841 const GLintptr mapEnd = obj->Mappings[MAP_USER].Offset +
842 obj->Mappings[MAP_USER].Length;
843
844 if (!(end <= obj->Mappings[MAP_USER].Offset || offset >= mapEnd)) {
845 return true;
846 }
847 }
848 return false;
849 }
850
851
852 /**
853 * Tests the subdata range parameters and sets the GL error code for
854 * \c glBufferSubDataARB, \c glGetBufferSubDataARB and
855 * \c glClearBufferSubData.
856 *
857 * \param ctx GL context.
858 * \param bufObj The buffer object.
859 * \param offset Offset of the first byte of the subdata range.
860 * \param size Size, in bytes, of the subdata range.
861 * \param mappedRange If true, checks if an overlapping range is mapped.
862 * If false, checks if buffer is mapped.
863 * \param caller Name of calling function for recording errors.
864 * \return false if error, true otherwise
865 *
866 * \sa glBufferSubDataARB, glGetBufferSubDataARB, glClearBufferSubData
867 */
868 static bool
buffer_object_subdata_range_good(struct gl_context * ctx,const struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool mappedRange,const char * caller)869 buffer_object_subdata_range_good(struct gl_context *ctx,
870 const struct gl_buffer_object *bufObj,
871 GLintptr offset, GLsizeiptr size,
872 bool mappedRange, const char *caller)
873 {
874 if (size < 0) {
875 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", caller);
876 return false;
877 }
878
879 if (offset < 0) {
880 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset < 0)", caller);
881 return false;
882 }
883
884 if (offset + size > bufObj->Size) {
885 _mesa_error(ctx, GL_INVALID_VALUE,
886 "%s(offset %lu + size %lu > buffer size %lu)", caller,
887 (unsigned long) offset,
888 (unsigned long) size,
889 (unsigned long) bufObj->Size);
890 return false;
891 }
892
893 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT)
894 return true;
895
896 if (mappedRange) {
897 if (bufferobj_range_mapped(bufObj, offset, size)) {
898 _mesa_error(ctx, GL_INVALID_OPERATION,
899 "%s(range is mapped without persistent bit)",
900 caller);
901 return false;
902 }
903 }
904 else {
905 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
906 _mesa_error(ctx, GL_INVALID_OPERATION,
907 "%s(buffer is mapped without persistent bit)",
908 caller);
909 return false;
910 }
911 }
912
913 return true;
914 }
915
916
917 /**
918 * Test the format and type parameters and set the GL error code for
919 * \c glClearBufferData, \c glClearNamedBufferData, \c glClearBufferSubData
920 * and \c glClearNamedBufferSubData.
921 *
922 * \param ctx GL context.
923 * \param internalformat Format to which the data is to be converted.
924 * \param format Format of the supplied data.
925 * \param type Type of the supplied data.
926 * \param caller Name of calling function for recording errors.
927 * \return If internalformat, format and type are legal the mesa_format
928 * corresponding to internalformat, otherwise MESA_FORMAT_NONE.
929 *
930 * \sa glClearBufferData, glClearNamedBufferData, glClearBufferSubData and
931 * glClearNamedBufferSubData.
932 */
933 static mesa_format
validate_clear_buffer_format(struct gl_context * ctx,GLenum internalformat,GLenum format,GLenum type,const char * caller)934 validate_clear_buffer_format(struct gl_context *ctx,
935 GLenum internalformat,
936 GLenum format, GLenum type,
937 const char *caller)
938 {
939 mesa_format mesaFormat;
940 GLenum errorFormatType;
941
942 mesaFormat = _mesa_validate_texbuffer_format(ctx, internalformat);
943 if (mesaFormat == MESA_FORMAT_NONE) {
944 _mesa_error(ctx, GL_INVALID_ENUM,
945 "%s(invalid internalformat)", caller);
946 return MESA_FORMAT_NONE;
947 }
948
949 /* NOTE: not mentioned in ARB_clear_buffer_object but according to
950 * EXT_texture_integer there is no conversion between integer and
951 * non-integer formats
952 */
953 if (_mesa_is_enum_format_signed_int(format) !=
954 _mesa_is_format_integer_color(mesaFormat)) {
955 _mesa_error(ctx, GL_INVALID_OPERATION,
956 "%s(integer vs non-integer)", caller);
957 return MESA_FORMAT_NONE;
958 }
959
960 if (!_mesa_is_color_format(format)) {
961 _mesa_error(ctx, GL_INVALID_VALUE,
962 "%s(format is not a color format)", caller);
963 return MESA_FORMAT_NONE;
964 }
965
966 errorFormatType = _mesa_error_check_format_and_type(ctx, format, type);
967 if (errorFormatType != GL_NO_ERROR) {
968 _mesa_error(ctx, GL_INVALID_VALUE,
969 "%s(invalid format or type)", caller);
970 return MESA_FORMAT_NONE;
971 }
972
973 return mesaFormat;
974 }
975
976
977 /**
978 * Convert user-specified clear value to the specified internal format.
979 *
980 * \param ctx GL context.
981 * \param internalformat Format to which the data is converted.
982 * \param clearValue Points to the converted clear value.
983 * \param format Format of the supplied data.
984 * \param type Type of the supplied data.
985 * \param data Data which is to be converted to internalformat.
986 * \param caller Name of calling function for recording errors.
987 * \return true if data could be converted, false otherwise.
988 *
989 * \sa glClearBufferData, glClearBufferSubData
990 */
991 static bool
convert_clear_buffer_data(struct gl_context * ctx,mesa_format internalformat,GLubyte * clearValue,GLenum format,GLenum type,const GLvoid * data,const char * caller)992 convert_clear_buffer_data(struct gl_context *ctx,
993 mesa_format internalformat,
994 GLubyte *clearValue, GLenum format, GLenum type,
995 const GLvoid *data, const char *caller)
996 {
997 GLenum internalformatBase = _mesa_get_format_base_format(internalformat);
998
999 if (_mesa_texstore(ctx, 1, internalformatBase, internalformat,
1000 0, &clearValue, 1, 1, 1,
1001 format, type, data, &ctx->Unpack)) {
1002 return true;
1003 }
1004 else {
1005 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1006 return false;
1007 }
1008 }
1009
1010 void
_mesa_bufferobj_release_buffer(struct gl_buffer_object * obj)1011 _mesa_bufferobj_release_buffer(struct gl_buffer_object *obj)
1012 {
1013 if (!obj->buffer)
1014 return;
1015
1016 /* Subtract the remaining private references before unreferencing
1017 * the buffer. See the header file for explanation.
1018 */
1019 if (obj->private_refcount) {
1020 assert(obj->private_refcount > 0);
1021 p_atomic_add(&obj->buffer->reference.count,
1022 -obj->private_refcount);
1023 obj->private_refcount = 0;
1024 }
1025 obj->private_refcount_ctx = NULL;
1026
1027 pipe_resource_reference(&obj->buffer, NULL);
1028 }
1029
1030 /**
1031 * Delete a buffer object.
1032 *
1033 * Default callback for the \c dd_function_table::DeleteBuffer() hook.
1034 */
1035 void
_mesa_delete_buffer_object(struct gl_context * ctx,struct gl_buffer_object * bufObj)1036 _mesa_delete_buffer_object(struct gl_context *ctx,
1037 struct gl_buffer_object *bufObj)
1038 {
1039 assert(bufObj->RefCount == 0);
1040 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1041 _mesa_bufferobj_release_buffer(bufObj);
1042
1043 vbo_delete_minmax_cache(bufObj);
1044 align_free(bufObj->Data);
1045
1046 /* assign strange values here to help w/ debugging */
1047 bufObj->RefCount = -1000;
1048 bufObj->Name = ~0;
1049
1050 simple_mtx_destroy(&bufObj->MinMaxCacheMutex);
1051 free(bufObj->Label);
1052 free(bufObj);
1053 }
1054
1055
1056
1057 /**
1058 * Set ptr to bufObj w/ reference counting.
1059 * This is normally only called from the _mesa_reference_buffer_object() macro
1060 * when there's a real pointer change.
1061 */
1062 void
_mesa_reference_buffer_object_(struct gl_context * ctx,struct gl_buffer_object ** ptr,struct gl_buffer_object * bufObj,bool shared_binding)1063 _mesa_reference_buffer_object_(struct gl_context *ctx,
1064 struct gl_buffer_object **ptr,
1065 struct gl_buffer_object *bufObj,
1066 bool shared_binding)
1067 {
1068 if (*ptr) {
1069 /* Unreference the old buffer */
1070 struct gl_buffer_object *oldObj = *ptr;
1071
1072 assert(oldObj->RefCount >= 1);
1073
1074 /* Count references only if the context doesn't own the buffer or if
1075 * ptr is a binding point shared by multiple contexts (such as a texture
1076 * buffer object being a buffer bound within a texture object).
1077 */
1078 if (shared_binding || ctx != oldObj->Ctx) {
1079 if (p_atomic_dec_zero(&oldObj->RefCount)) {
1080 _mesa_delete_buffer_object(ctx, oldObj);
1081 }
1082 } else if (ctx == oldObj->Ctx) {
1083 /* Update the private ref count. */
1084 assert(oldObj->CtxRefCount >= 1);
1085 oldObj->CtxRefCount--;
1086 }
1087
1088 *ptr = NULL;
1089 }
1090 assert(!*ptr);
1091
1092 if (bufObj) {
1093 /* reference new buffer */
1094 if (shared_binding || ctx != bufObj->Ctx)
1095 p_atomic_inc(&bufObj->RefCount);
1096 else if (ctx == bufObj->Ctx)
1097 bufObj->CtxRefCount++;
1098
1099 *ptr = bufObj;
1100 }
1101 }
1102
1103
1104 /**
1105 * Get the value of MESA_NO_MINMAX_CACHE.
1106 */
1107 static bool
get_no_minmax_cache()1108 get_no_minmax_cache()
1109 {
1110 static bool read = false;
1111 static bool disable = false;
1112
1113 if (!read) {
1114 disable = env_var_as_boolean("MESA_NO_MINMAX_CACHE", false);
1115 read = true;
1116 }
1117
1118 return disable;
1119 }
1120
1121 /**
1122 * Callback called from _mesa_HashWalk()
1123 */
1124 static void
count_buffer_size(void * data,void * userData)1125 count_buffer_size(void *data, void *userData)
1126 {
1127 const struct gl_buffer_object *bufObj =
1128 (const struct gl_buffer_object *) data;
1129 GLuint *total = (GLuint *) userData;
1130
1131 *total = *total + bufObj->Size;
1132 }
1133
1134
1135 /**
1136 * Compute total size (in bytes) of all buffer objects for the given context.
1137 * For debugging purposes.
1138 */
1139 GLuint
_mesa_total_buffer_object_memory(struct gl_context * ctx)1140 _mesa_total_buffer_object_memory(struct gl_context *ctx)
1141 {
1142 GLuint total = 0;
1143
1144 _mesa_HashWalkMaybeLocked(ctx->Shared->BufferObjects, count_buffer_size,
1145 &total, ctx->BufferObjectsLocked);
1146
1147 return total;
1148 }
1149
1150 /**
1151 * Initialize the state associated with buffer objects
1152 */
1153 void
_mesa_init_buffer_objects(struct gl_context * ctx)1154 _mesa_init_buffer_objects( struct gl_context *ctx )
1155 {
1156 GLuint i;
1157
1158 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1159 _mesa_reference_buffer_object(ctx,
1160 &ctx->UniformBufferBindings[i].BufferObject,
1161 NULL);
1162 ctx->UniformBufferBindings[i].Offset = -1;
1163 ctx->UniformBufferBindings[i].Size = -1;
1164 }
1165
1166 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1167 _mesa_reference_buffer_object(ctx,
1168 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1169 NULL);
1170 ctx->ShaderStorageBufferBindings[i].Offset = -1;
1171 ctx->ShaderStorageBufferBindings[i].Size = -1;
1172 }
1173
1174 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1175 _mesa_reference_buffer_object(ctx,
1176 &ctx->AtomicBufferBindings[i].BufferObject,
1177 NULL);
1178 ctx->AtomicBufferBindings[i].Offset = 0;
1179 ctx->AtomicBufferBindings[i].Size = 0;
1180 }
1181 }
1182
1183 /**
1184 * Detach the context from the buffer to re-enable buffer reference counting
1185 * for this context.
1186 */
1187 static void
detach_ctx_from_buffer(struct gl_context * ctx,struct gl_buffer_object * buf)1188 detach_ctx_from_buffer(struct gl_context *ctx, struct gl_buffer_object *buf)
1189 {
1190 assert(buf->Ctx == ctx);
1191
1192 /* Move private non-atomic context references to the global ref count. */
1193 p_atomic_add(&buf->RefCount, buf->CtxRefCount);
1194 buf->CtxRefCount = 0;
1195 buf->Ctx = NULL;
1196
1197 /* Remove the context reference where the context holds one
1198 * reference for the lifetime of the buffer ID to skip refcount
1199 * atomics instead of each binding point holding the reference.
1200 */
1201 _mesa_reference_buffer_object(ctx, &buf, NULL);
1202 }
1203
1204 /**
1205 * Zombie buffers are buffers that were created by one context and deleted
1206 * by another context. The creating context holds a global reference for each
1207 * buffer it created that can't be unreferenced when another context deletes
1208 * it. Such a buffer becomes a zombie, which means that it's no longer usable
1209 * by OpenGL, but the creating context still holds its global reference of
1210 * the buffer. Only the creating context can remove the reference, which is
1211 * what this function does.
1212 *
1213 * For all zombie buffers, decrement the reference count if the current
1214 * context owns the buffer.
1215 */
1216 static void
unreference_zombie_buffers_for_ctx(struct gl_context * ctx)1217 unreference_zombie_buffers_for_ctx(struct gl_context *ctx)
1218 {
1219 /* It's assumed that the mutex of Shared->BufferObjects is locked. */
1220 set_foreach(ctx->Shared->ZombieBufferObjects, entry) {
1221 struct gl_buffer_object *buf = (struct gl_buffer_object *)entry->key;
1222
1223 if (buf->Ctx == ctx) {
1224 _mesa_set_remove(ctx->Shared->ZombieBufferObjects, entry);
1225 detach_ctx_from_buffer(ctx, buf);
1226 }
1227 }
1228 }
1229
1230 /**
1231 * When a context creates buffers, it holds a global buffer reference count
1232 * for each buffer and doesn't update their RefCount. When the context is
1233 * destroyed before the buffers are destroyed, the context must remove
1234 * its global reference from the buffers, so that the buffers can live
1235 * on their own.
1236 *
1237 * At this point, the buffers shouldn't be bound in any bounding point owned
1238 * by the context. (it would crash if they did)
1239 */
1240 static void
detach_unrefcounted_buffer_from_ctx(void * data,void * userData)1241 detach_unrefcounted_buffer_from_ctx(void *data, void *userData)
1242 {
1243 struct gl_context *ctx = (struct gl_context *)userData;
1244 struct gl_buffer_object *buf = (struct gl_buffer_object *)data;
1245
1246 if (buf->Ctx == ctx) {
1247 /* Detach the current context from live objects. There should be no
1248 * bound buffer in the context at this point, therefore we can just
1249 * unreference the global reference. Other contexts and texture objects
1250 * might still be using the buffer.
1251 */
1252 assert(buf->CtxRefCount == 0);
1253 buf->Ctx = NULL;
1254 _mesa_reference_buffer_object(ctx, &buf, NULL);
1255 }
1256 }
1257
1258 void
_mesa_free_buffer_objects(struct gl_context * ctx)1259 _mesa_free_buffer_objects( struct gl_context *ctx )
1260 {
1261 GLuint i;
1262
1263 _mesa_reference_buffer_object(ctx, &ctx->Array.ArrayBufferObj, NULL);
1264
1265 _mesa_reference_buffer_object(ctx, &ctx->CopyReadBuffer, NULL);
1266 _mesa_reference_buffer_object(ctx, &ctx->CopyWriteBuffer, NULL);
1267
1268 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, NULL);
1269
1270 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, NULL);
1271
1272 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, NULL);
1273
1274 _mesa_reference_buffer_object(ctx, &ctx->DrawIndirectBuffer, NULL);
1275
1276 _mesa_reference_buffer_object(ctx, &ctx->ParameterBuffer, NULL);
1277
1278 _mesa_reference_buffer_object(ctx, &ctx->DispatchIndirectBuffer, NULL);
1279
1280 _mesa_reference_buffer_object(ctx, &ctx->QueryBuffer, NULL);
1281
1282 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1283 _mesa_reference_buffer_object(ctx,
1284 &ctx->UniformBufferBindings[i].BufferObject,
1285 NULL);
1286 }
1287
1288 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1289 _mesa_reference_buffer_object(ctx,
1290 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1291 NULL);
1292 }
1293
1294 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1295 _mesa_reference_buffer_object(ctx,
1296 &ctx->AtomicBufferBindings[i].BufferObject,
1297 NULL);
1298 }
1299
1300 _mesa_HashLockMutex(ctx->Shared->BufferObjects);
1301 unreference_zombie_buffers_for_ctx(ctx);
1302 _mesa_HashWalkLocked(ctx->Shared->BufferObjects,
1303 detach_unrefcounted_buffer_from_ctx, ctx);
1304 _mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
1305 }
1306
1307 struct gl_buffer_object *
_mesa_bufferobj_alloc(struct gl_context * ctx,GLuint id)1308 _mesa_bufferobj_alloc(struct gl_context *ctx, GLuint id)
1309 {
1310 struct gl_buffer_object *buf = CALLOC_STRUCT(gl_buffer_object);
1311 if (!buf)
1312 return NULL;
1313
1314 buf->RefCount = 1;
1315 buf->Name = id;
1316 buf->Usage = GL_STATIC_DRAW_ARB;
1317
1318 simple_mtx_init(&buf->MinMaxCacheMutex, mtx_plain);
1319 if (get_no_minmax_cache())
1320 buf->UsageHistory |= USAGE_DISABLE_MINMAX_CACHE;
1321 return buf;
1322 }
1323 /**
1324 * Create a buffer object that will be backed by an OpenGL buffer ID
1325 * where the creating context will hold one global buffer reference instead
1326 * of updating buffer RefCount for every binding point.
1327 *
1328 * This shouldn't be used for internal buffers.
1329 */
1330 static struct gl_buffer_object *
new_gl_buffer_object(struct gl_context * ctx,GLuint id)1331 new_gl_buffer_object(struct gl_context *ctx, GLuint id)
1332 {
1333 struct gl_buffer_object *buf = _mesa_bufferobj_alloc(ctx, id);
1334
1335 buf->Ctx = ctx;
1336 buf->RefCount++; /* global buffer reference held by the context */
1337 return buf;
1338 }
1339
1340 bool
_mesa_handle_bind_buffer_gen(struct gl_context * ctx,GLuint buffer,struct gl_buffer_object ** buf_handle,const char * caller,bool no_error)1341 _mesa_handle_bind_buffer_gen(struct gl_context *ctx,
1342 GLuint buffer,
1343 struct gl_buffer_object **buf_handle,
1344 const char *caller, bool no_error)
1345 {
1346 struct gl_buffer_object *buf = *buf_handle;
1347
1348 if (!no_error && !buf && (ctx->API == API_OPENGL_CORE)) {
1349 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(non-gen name)", caller);
1350 return false;
1351 }
1352
1353 if (!buf || buf == &DummyBufferObject) {
1354 /* If this is a new buffer object id, or one which was generated but
1355 * never used before, allocate a buffer object now.
1356 */
1357 *buf_handle = new_gl_buffer_object(ctx, buffer);
1358 if (!*buf_handle) {
1359 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1360 return false;
1361 }
1362 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
1363 ctx->BufferObjectsLocked);
1364 _mesa_HashInsertLocked(ctx->Shared->BufferObjects, buffer,
1365 *buf_handle, buf != NULL);
1366 /* If one context only creates buffers and another context only deletes
1367 * buffers, buffers don't get released because it only produces zombie
1368 * buffers. Only the context that has created the buffers can release
1369 * them. Thus, when we create buffers, we prune the list of zombie
1370 * buffers.
1371 */
1372 unreference_zombie_buffers_for_ctx(ctx);
1373 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
1374 ctx->BufferObjectsLocked);
1375 }
1376
1377 return true;
1378 }
1379
1380 /**
1381 * Bind the specified target to buffer for the specified context.
1382 * Called by glBindBuffer() and other functions.
1383 */
1384 static void
bind_buffer_object(struct gl_context * ctx,struct gl_buffer_object ** bindTarget,GLuint buffer,bool no_error)1385 bind_buffer_object(struct gl_context *ctx,
1386 struct gl_buffer_object **bindTarget, GLuint buffer,
1387 bool no_error)
1388 {
1389 struct gl_buffer_object *oldBufObj;
1390 struct gl_buffer_object *newBufObj = NULL;
1391
1392 assert(bindTarget);
1393
1394 /* Get pointer to old buffer object (to be unbound) */
1395 oldBufObj = *bindTarget;
1396 if ((oldBufObj && oldBufObj->Name == buffer && !oldBufObj->DeletePending) ||
1397 (!oldBufObj && buffer == 0))
1398 return; /* rebinding the same buffer object- no change */
1399
1400 /*
1401 * Get pointer to new buffer object (newBufObj)
1402 */
1403 if (buffer != 0) {
1404 /* non-default buffer object */
1405 newBufObj = _mesa_lookup_bufferobj(ctx, buffer);
1406 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
1407 &newBufObj, "glBindBuffer",
1408 no_error))
1409 return;
1410 }
1411
1412 /* bind new buffer */
1413 _mesa_reference_buffer_object(ctx, bindTarget, newBufObj);
1414 }
1415
1416
1417 /**
1418 * Update the default buffer objects in the given context to reference those
1419 * specified in the shared state and release those referencing the old
1420 * shared state.
1421 */
1422 void
_mesa_update_default_objects_buffer_objects(struct gl_context * ctx)1423 _mesa_update_default_objects_buffer_objects(struct gl_context *ctx)
1424 {
1425 /* Bind 0 to remove references to those in the shared context hash table. */
1426 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1427 bind_buffer_object(ctx, &ctx->Array.VAO->IndexBufferObj, 0, false);
1428 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1429 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1430 }
1431
1432
1433
1434 /**
1435 * Return the gl_buffer_object for the given ID.
1436 * Always return NULL for ID 0.
1437 */
1438 struct gl_buffer_object *
_mesa_lookup_bufferobj(struct gl_context * ctx,GLuint buffer)1439 _mesa_lookup_bufferobj(struct gl_context *ctx, GLuint buffer)
1440 {
1441 if (buffer == 0)
1442 return NULL;
1443 else
1444 return (struct gl_buffer_object *)
1445 _mesa_HashLookupMaybeLocked(ctx->Shared->BufferObjects, buffer,
1446 ctx->BufferObjectsLocked);
1447 }
1448
1449
1450 struct gl_buffer_object *
_mesa_lookup_bufferobj_locked(struct gl_context * ctx,GLuint buffer)1451 _mesa_lookup_bufferobj_locked(struct gl_context *ctx, GLuint buffer)
1452 {
1453 if (buffer == 0)
1454 return NULL;
1455 else
1456 return (struct gl_buffer_object *)
1457 _mesa_HashLookupLocked(ctx->Shared->BufferObjects, buffer);
1458 }
1459
1460 /**
1461 * A convenience function for direct state access functions that throws
1462 * GL_INVALID_OPERATION if buffer is not the name of an existing
1463 * buffer object.
1464 */
1465 struct gl_buffer_object *
_mesa_lookup_bufferobj_err(struct gl_context * ctx,GLuint buffer,const char * caller)1466 _mesa_lookup_bufferobj_err(struct gl_context *ctx, GLuint buffer,
1467 const char *caller)
1468 {
1469 struct gl_buffer_object *bufObj;
1470
1471 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
1472 if (!bufObj || bufObj == &DummyBufferObject) {
1473 _mesa_error(ctx, GL_INVALID_OPERATION,
1474 "%s(non-existent buffer object %u)", caller, buffer);
1475 return NULL;
1476 }
1477
1478 return bufObj;
1479 }
1480
1481
1482 /**
1483 * Look up a buffer object for a multi-bind function.
1484 *
1485 * Unlike _mesa_lookup_bufferobj(), this function also takes care
1486 * of generating an error if the buffer ID is not zero or the name
1487 * of an existing buffer object.
1488 *
1489 * If the buffer ID refers to an existing buffer object, a pointer
1490 * to the buffer object is returned. If the ID is zero, NULL is returned.
1491 * If the ID is not zero and does not refer to a valid buffer object, this
1492 * function returns NULL.
1493 *
1494 * This function assumes that the caller has already locked the
1495 * hash table mutex by calling
1496 * _mesa_HashLockMutex(ctx->Shared->BufferObjects).
1497 */
1498 struct gl_buffer_object *
_mesa_multi_bind_lookup_bufferobj(struct gl_context * ctx,const GLuint * buffers,GLuint index,const char * caller,bool * error)1499 _mesa_multi_bind_lookup_bufferobj(struct gl_context *ctx,
1500 const GLuint *buffers,
1501 GLuint index, const char *caller,
1502 bool *error)
1503 {
1504 struct gl_buffer_object *bufObj = NULL;
1505
1506 *error = false;
1507
1508 if (buffers[index] != 0) {
1509 bufObj = _mesa_lookup_bufferobj_locked(ctx, buffers[index]);
1510
1511 /* The multi-bind functions don't create the buffer objects
1512 when they don't exist. */
1513 if (bufObj == &DummyBufferObject)
1514 bufObj = NULL;
1515
1516 if (!bufObj) {
1517 /* The ARB_multi_bind spec says:
1518 *
1519 * "An INVALID_OPERATION error is generated if any value
1520 * in <buffers> is not zero or the name of an existing
1521 * buffer object (per binding)."
1522 */
1523 _mesa_error(ctx, GL_INVALID_OPERATION,
1524 "%s(buffers[%u]=%u is not zero or the name "
1525 "of an existing buffer object)",
1526 caller, index, buffers[index]);
1527 *error = true;
1528 }
1529 }
1530
1531 return bufObj;
1532 }
1533
1534
1535 /**
1536 * If *ptr points to obj, set ptr = the Null/default buffer object.
1537 * This is a helper for buffer object deletion.
1538 * The GL spec says that deleting a buffer object causes it to get
1539 * unbound from all arrays in the current context.
1540 */
1541 static void
unbind(struct gl_context * ctx,struct gl_vertex_array_object * vao,unsigned index,struct gl_buffer_object * obj)1542 unbind(struct gl_context *ctx,
1543 struct gl_vertex_array_object *vao, unsigned index,
1544 struct gl_buffer_object *obj)
1545 {
1546 if (vao->BufferBinding[index].BufferObj == obj) {
1547 _mesa_bind_vertex_buffer(ctx, vao, index, NULL,
1548 vao->BufferBinding[index].Offset,
1549 vao->BufferBinding[index].Stride, true, false);
1550 }
1551 }
1552
1553 void
_mesa_buffer_unmap_all_mappings(struct gl_context * ctx,struct gl_buffer_object * bufObj)1554 _mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
1555 struct gl_buffer_object *bufObj)
1556 {
1557 for (int i = 0; i < MAP_COUNT; i++) {
1558 if (_mesa_bufferobj_mapped(bufObj, i)) {
1559 _mesa_bufferobj_unmap(ctx, bufObj, i);
1560 assert(bufObj->Mappings[i].Pointer == NULL);
1561 bufObj->Mappings[i].AccessFlags = 0;
1562 }
1563 }
1564 }
1565
1566
1567 /**********************************************************************/
1568 /* API Functions */
1569 /**********************************************************************/
1570
1571 void GLAPIENTRY
_mesa_BindBuffer_no_error(GLenum target,GLuint buffer)1572 _mesa_BindBuffer_no_error(GLenum target, GLuint buffer)
1573 {
1574 GET_CURRENT_CONTEXT(ctx);
1575
1576 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target);
1577 bind_buffer_object(ctx, bindTarget, buffer, true);
1578 }
1579
1580
1581 void GLAPIENTRY
_mesa_BindBuffer(GLenum target,GLuint buffer)1582 _mesa_BindBuffer(GLenum target, GLuint buffer)
1583 {
1584 GET_CURRENT_CONTEXT(ctx);
1585
1586 if (MESA_VERBOSE & VERBOSE_API) {
1587 _mesa_debug(ctx, "glBindBuffer(%s, %u)\n",
1588 _mesa_enum_to_string(target), buffer);
1589 }
1590
1591 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target);
1592 if (!bindTarget) {
1593 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferARB(target %s)",
1594 _mesa_enum_to_string(target));
1595 return;
1596 }
1597
1598 bind_buffer_object(ctx, bindTarget, buffer, false);
1599 }
1600
1601 void
_mesa_InternalBindElementBuffer(struct gl_context * ctx,struct gl_buffer_object * buf)1602 _mesa_InternalBindElementBuffer(struct gl_context *ctx,
1603 struct gl_buffer_object *buf)
1604 {
1605 struct gl_buffer_object **bindTarget =
1606 get_buffer_target(ctx, GL_ELEMENT_ARRAY_BUFFER);
1607
1608 /* Move the buffer reference from the parameter to the bind point. */
1609 _mesa_reference_buffer_object(ctx, bindTarget, NULL);
1610 if (buf)
1611 *bindTarget = buf;
1612 }
1613
1614 /**
1615 * Binds a buffer object to a binding point.
1616 *
1617 * The caller is responsible for validating the offset,
1618 * flushing the vertices and updating NewDriverState.
1619 */
1620 static void
set_buffer_binding(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool autoSize,gl_buffer_usage usage)1621 set_buffer_binding(struct gl_context *ctx,
1622 struct gl_buffer_binding *binding,
1623 struct gl_buffer_object *bufObj,
1624 GLintptr offset,
1625 GLsizeiptr size,
1626 bool autoSize, gl_buffer_usage usage)
1627 {
1628 _mesa_reference_buffer_object(ctx, &binding->BufferObject, bufObj);
1629
1630 binding->Offset = offset;
1631 binding->Size = size;
1632 binding->AutomaticSize = autoSize;
1633
1634 /* If this is a real buffer object, mark it has having been used
1635 * at some point as an atomic counter buffer.
1636 */
1637 if (size >= 0)
1638 bufObj->UsageHistory |= usage;
1639 }
1640
1641 static void
set_buffer_multi_binding(struct gl_context * ctx,const GLuint * buffers,int idx,const char * caller,struct gl_buffer_binding * binding,GLintptr offset,GLsizeiptr size,bool range,gl_buffer_usage usage)1642 set_buffer_multi_binding(struct gl_context *ctx,
1643 const GLuint *buffers,
1644 int idx,
1645 const char *caller,
1646 struct gl_buffer_binding *binding,
1647 GLintptr offset,
1648 GLsizeiptr size,
1649 bool range,
1650 gl_buffer_usage usage)
1651 {
1652 struct gl_buffer_object *bufObj;
1653
1654 if (binding->BufferObject && binding->BufferObject->Name == buffers[idx])
1655 bufObj = binding->BufferObject;
1656 else {
1657 bool error;
1658 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, idx, caller,
1659 &error);
1660 if (error)
1661 return;
1662 }
1663
1664 if (!bufObj)
1665 set_buffer_binding(ctx, binding, bufObj, -1, -1, !range, usage);
1666 else
1667 set_buffer_binding(ctx, binding, bufObj, offset, size, !range, usage);
1668 }
1669
1670 static void
bind_buffer(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize,uint64_t driver_state,gl_buffer_usage usage)1671 bind_buffer(struct gl_context *ctx,
1672 struct gl_buffer_binding *binding,
1673 struct gl_buffer_object *bufObj,
1674 GLintptr offset,
1675 GLsizeiptr size,
1676 GLboolean autoSize,
1677 uint64_t driver_state,
1678 gl_buffer_usage usage)
1679 {
1680 if (binding->BufferObject == bufObj &&
1681 binding->Offset == offset &&
1682 binding->Size == size &&
1683 binding->AutomaticSize == autoSize) {
1684 return;
1685 }
1686
1687 FLUSH_VERTICES(ctx, 0, 0);
1688 ctx->NewDriverState |= driver_state;
1689
1690 set_buffer_binding(ctx, binding, bufObj, offset, size, autoSize, usage);
1691 }
1692
1693 /**
1694 * Binds a buffer object to a uniform buffer binding point.
1695 *
1696 * Unlike set_buffer_binding(), this function also flushes vertices
1697 * and updates NewDriverState. It also checks if the binding
1698 * has actually changed before updating it.
1699 */
1700 static void
bind_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1701 bind_uniform_buffer(struct gl_context *ctx,
1702 GLuint index,
1703 struct gl_buffer_object *bufObj,
1704 GLintptr offset,
1705 GLsizeiptr size,
1706 GLboolean autoSize)
1707 {
1708 bind_buffer(ctx, &ctx->UniformBufferBindings[index],
1709 bufObj, offset, size, autoSize,
1710 ST_NEW_UNIFORM_BUFFER,
1711 USAGE_UNIFORM_BUFFER);
1712 }
1713
1714 /**
1715 * Binds a buffer object to a shader storage buffer binding point.
1716 *
1717 * Unlike set_ssbo_binding(), this function also flushes vertices
1718 * and updates NewDriverState. It also checks if the binding
1719 * has actually changed before updating it.
1720 */
1721 static void
bind_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1722 bind_shader_storage_buffer(struct gl_context *ctx,
1723 GLuint index,
1724 struct gl_buffer_object *bufObj,
1725 GLintptr offset,
1726 GLsizeiptr size,
1727 GLboolean autoSize)
1728 {
1729 bind_buffer(ctx, &ctx->ShaderStorageBufferBindings[index],
1730 bufObj, offset, size, autoSize,
1731 ST_NEW_STORAGE_BUFFER,
1732 USAGE_SHADER_STORAGE_BUFFER);
1733 }
1734
1735 /**
1736 * Binds a buffer object to an atomic buffer binding point.
1737 *
1738 * Unlike set_atomic_binding(), this function also flushes vertices
1739 * and updates NewDriverState. It also checks if the binding
1740 * has actually changed before updating it.
1741 */
1742 static void
bind_atomic_buffer(struct gl_context * ctx,unsigned index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1743 bind_atomic_buffer(struct gl_context *ctx, unsigned index,
1744 struct gl_buffer_object *bufObj, GLintptr offset,
1745 GLsizeiptr size, GLboolean autoSize)
1746 {
1747 bind_buffer(ctx, &ctx->AtomicBufferBindings[index],
1748 bufObj, offset, size, autoSize,
1749 ctx->DriverFlags.NewAtomicBuffer,
1750 USAGE_ATOMIC_COUNTER_BUFFER);
1751 }
1752
1753 /**
1754 * Bind a buffer object to a uniform block binding point.
1755 * As above, but offset = 0.
1756 */
1757 static void
bind_buffer_base_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1758 bind_buffer_base_uniform_buffer(struct gl_context *ctx,
1759 GLuint index,
1760 struct gl_buffer_object *bufObj)
1761 {
1762 if (index >= ctx->Const.MaxUniformBufferBindings) {
1763 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1764 return;
1765 }
1766
1767 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
1768
1769 if (!bufObj)
1770 bind_uniform_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1771 else
1772 bind_uniform_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1773 }
1774
1775 /**
1776 * Bind a buffer object to a shader storage block binding point.
1777 * As above, but offset = 0.
1778 */
1779 static void
bind_buffer_base_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1780 bind_buffer_base_shader_storage_buffer(struct gl_context *ctx,
1781 GLuint index,
1782 struct gl_buffer_object *bufObj)
1783 {
1784 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
1785 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1786 return;
1787 }
1788
1789 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
1790
1791 if (!bufObj)
1792 bind_shader_storage_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1793 else
1794 bind_shader_storage_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1795 }
1796
1797 /**
1798 * Bind a buffer object to a shader storage block binding point.
1799 * As above, but offset = 0.
1800 */
1801 static void
bind_buffer_base_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1802 bind_buffer_base_atomic_buffer(struct gl_context *ctx,
1803 GLuint index,
1804 struct gl_buffer_object *bufObj)
1805 {
1806 if (index >= ctx->Const.MaxAtomicBufferBindings) {
1807 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1808 return;
1809 }
1810
1811 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
1812
1813 if (!bufObj)
1814 bind_atomic_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1815 else
1816 bind_atomic_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1817 }
1818
1819 /**
1820 * Delete a set of buffer objects.
1821 *
1822 * \param n Number of buffer objects to delete.
1823 * \param ids Array of \c n buffer object IDs.
1824 */
1825 static void
delete_buffers(struct gl_context * ctx,GLsizei n,const GLuint * ids)1826 delete_buffers(struct gl_context *ctx, GLsizei n, const GLuint *ids)
1827 {
1828 FLUSH_VERTICES(ctx, 0, 0);
1829
1830 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
1831 ctx->BufferObjectsLocked);
1832 unreference_zombie_buffers_for_ctx(ctx);
1833
1834 for (GLsizei i = 0; i < n; i++) {
1835 struct gl_buffer_object *bufObj =
1836 _mesa_lookup_bufferobj_locked(ctx, ids[i]);
1837 if (bufObj) {
1838 struct gl_vertex_array_object *vao = ctx->Array.VAO;
1839 GLuint j;
1840
1841 assert(bufObj->Name == ids[i] || bufObj == &DummyBufferObject);
1842
1843 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1844
1845 /* unbind any vertex pointers bound to this buffer */
1846 for (j = 0; j < ARRAY_SIZE(vao->BufferBinding); j++) {
1847 unbind(ctx, vao, j, bufObj);
1848 }
1849
1850 if (ctx->Array.ArrayBufferObj == bufObj) {
1851 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1852 }
1853 if (vao->IndexBufferObj == bufObj) {
1854 bind_buffer_object(ctx, &vao->IndexBufferObj, 0, false);
1855 }
1856
1857 /* unbind ARB_draw_indirect binding point */
1858 if (ctx->DrawIndirectBuffer == bufObj) {
1859 bind_buffer_object(ctx, &ctx->DrawIndirectBuffer, 0, false);
1860 }
1861
1862 /* unbind ARB_indirect_parameters binding point */
1863 if (ctx->ParameterBuffer == bufObj) {
1864 bind_buffer_object(ctx, &ctx->ParameterBuffer, 0, false);
1865 }
1866
1867 /* unbind ARB_compute_shader binding point */
1868 if (ctx->DispatchIndirectBuffer == bufObj) {
1869 bind_buffer_object(ctx, &ctx->DispatchIndirectBuffer, 0, false);
1870 }
1871
1872 /* unbind ARB_copy_buffer binding points */
1873 if (ctx->CopyReadBuffer == bufObj) {
1874 bind_buffer_object(ctx, &ctx->CopyReadBuffer, 0, false);
1875 }
1876 if (ctx->CopyWriteBuffer == bufObj) {
1877 bind_buffer_object(ctx, &ctx->CopyWriteBuffer, 0, false);
1878 }
1879
1880 /* unbind transform feedback binding points */
1881 if (ctx->TransformFeedback.CurrentBuffer == bufObj) {
1882 bind_buffer_object(ctx, &ctx->TransformFeedback.CurrentBuffer, 0, false);
1883 }
1884 for (j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1885 if (ctx->TransformFeedback.CurrentObject->Buffers[j] == bufObj) {
1886 _mesa_bind_buffer_base_transform_feedback(ctx,
1887 ctx->TransformFeedback.CurrentObject,
1888 j, NULL, false);
1889 }
1890 }
1891
1892 /* unbind UBO binding points */
1893 for (j = 0; j < ctx->Const.MaxUniformBufferBindings; j++) {
1894 if (ctx->UniformBufferBindings[j].BufferObject == bufObj) {
1895 bind_buffer_base_uniform_buffer(ctx, j, NULL);
1896 }
1897 }
1898
1899 if (ctx->UniformBuffer == bufObj) {
1900 bind_buffer_object(ctx, &ctx->UniformBuffer, 0, false);
1901 }
1902
1903 /* unbind SSBO binding points */
1904 for (j = 0; j < ctx->Const.MaxShaderStorageBufferBindings; j++) {
1905 if (ctx->ShaderStorageBufferBindings[j].BufferObject == bufObj) {
1906 bind_buffer_base_shader_storage_buffer(ctx, j, NULL);
1907 }
1908 }
1909
1910 if (ctx->ShaderStorageBuffer == bufObj) {
1911 bind_buffer_object(ctx, &ctx->ShaderStorageBuffer, 0, false);
1912 }
1913
1914 /* unbind Atomci Buffer binding points */
1915 for (j = 0; j < ctx->Const.MaxAtomicBufferBindings; j++) {
1916 if (ctx->AtomicBufferBindings[j].BufferObject == bufObj) {
1917 bind_buffer_base_atomic_buffer(ctx, j, NULL);
1918 }
1919 }
1920
1921 if (ctx->AtomicBuffer == bufObj) {
1922 bind_buffer_object(ctx, &ctx->AtomicBuffer, 0, false);
1923 }
1924
1925 /* unbind any pixel pack/unpack pointers bound to this buffer */
1926 if (ctx->Pack.BufferObj == bufObj) {
1927 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1928 }
1929 if (ctx->Unpack.BufferObj == bufObj) {
1930 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1931 }
1932
1933 if (ctx->Texture.BufferObject == bufObj) {
1934 bind_buffer_object(ctx, &ctx->Texture.BufferObject, 0, false);
1935 }
1936
1937 if (ctx->ExternalVirtualMemoryBuffer == bufObj) {
1938 bind_buffer_object(ctx, &ctx->ExternalVirtualMemoryBuffer, 0, false);
1939 }
1940
1941 /* unbind query buffer binding point */
1942 if (ctx->QueryBuffer == bufObj) {
1943 bind_buffer_object(ctx, &ctx->QueryBuffer, 0, false);
1944 }
1945
1946 /* The ID is immediately freed for re-use */
1947 _mesa_HashRemoveLocked(ctx->Shared->BufferObjects, ids[i]);
1948 /* Make sure we do not run into the classic ABA problem on bind.
1949 * We don't want to allow re-binding a buffer object that's been
1950 * "deleted" by glDeleteBuffers().
1951 *
1952 * The explicit rebinding to the default object in the current context
1953 * prevents the above in the current context, but another context
1954 * sharing the same objects might suffer from this problem.
1955 * The alternative would be to do the hash lookup in any case on bind
1956 * which would introduce more runtime overhead than this.
1957 */
1958 bufObj->DeletePending = GL_TRUE;
1959
1960 /* The GLuint ID holds one reference and the context that created
1961 * the buffer holds the other one.
1962 */
1963 assert(p_atomic_read(&bufObj->RefCount) >= (bufObj->Ctx ? 2 : 1));
1964
1965 if (bufObj->Ctx == ctx) {
1966 detach_ctx_from_buffer(ctx, bufObj);
1967 } else if (bufObj->Ctx) {
1968 /* Only the context holding it can release it. */
1969 _mesa_set_add(ctx->Shared->ZombieBufferObjects, bufObj);
1970 }
1971
1972 _mesa_reference_buffer_object(ctx, &bufObj, NULL);
1973 }
1974 }
1975
1976 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
1977 ctx->BufferObjectsLocked);
1978 }
1979
1980
1981 void GLAPIENTRY
_mesa_DeleteBuffers_no_error(GLsizei n,const GLuint * ids)1982 _mesa_DeleteBuffers_no_error(GLsizei n, const GLuint *ids)
1983 {
1984 GET_CURRENT_CONTEXT(ctx);
1985 delete_buffers(ctx, n, ids);
1986 }
1987
1988
1989 void GLAPIENTRY
_mesa_DeleteBuffers(GLsizei n,const GLuint * ids)1990 _mesa_DeleteBuffers(GLsizei n, const GLuint *ids)
1991 {
1992 GET_CURRENT_CONTEXT(ctx);
1993
1994 if (n < 0) {
1995 _mesa_error(ctx, GL_INVALID_VALUE, "glDeleteBuffersARB(n)");
1996 return;
1997 }
1998
1999 delete_buffers(ctx, n, ids);
2000 }
2001
2002
2003 /**
2004 * This is the implementation for glGenBuffers and glCreateBuffers. It is not
2005 * exposed to the rest of Mesa to encourage the use of nameless buffers in
2006 * driver internals.
2007 */
2008 static void
create_buffers(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)2009 create_buffers(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
2010 {
2011 struct gl_buffer_object *buf;
2012
2013 if (!buffers)
2014 return;
2015
2016 /*
2017 * This must be atomic (generation and allocation of buffer object IDs)
2018 */
2019 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
2020 ctx->BufferObjectsLocked);
2021 /* If one context only creates buffers and another context only deletes
2022 * buffers, buffers don't get released because it only produces zombie
2023 * buffers. Only the context that has created the buffers can release
2024 * them. Thus, when we create buffers, we prune the list of zombie
2025 * buffers.
2026 */
2027 unreference_zombie_buffers_for_ctx(ctx);
2028
2029 _mesa_HashFindFreeKeys(ctx->Shared->BufferObjects, buffers, n);
2030
2031 /* Insert the ID and pointer into the hash table. If non-DSA, insert a
2032 * DummyBufferObject. Otherwise, create a new buffer object and insert
2033 * it.
2034 */
2035 for (int i = 0; i < n; i++) {
2036 if (dsa) {
2037 buf = new_gl_buffer_object(ctx, buffers[i]);
2038 if (!buf) {
2039 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCreateBuffers");
2040 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
2041 ctx->BufferObjectsLocked);
2042 return;
2043 }
2044 }
2045 else
2046 buf = &DummyBufferObject;
2047
2048 _mesa_HashInsertLocked(ctx->Shared->BufferObjects, buffers[i], buf, true);
2049 }
2050
2051 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
2052 ctx->BufferObjectsLocked);
2053 }
2054
2055
2056 static void
create_buffers_err(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)2057 create_buffers_err(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
2058 {
2059 const char *func = dsa ? "glCreateBuffers" : "glGenBuffers";
2060
2061 if (MESA_VERBOSE & VERBOSE_API)
2062 _mesa_debug(ctx, "%s(%d)\n", func, n);
2063
2064 if (n < 0) {
2065 _mesa_error(ctx, GL_INVALID_VALUE, "%s(n %d < 0)", func, n);
2066 return;
2067 }
2068
2069 create_buffers(ctx, n, buffers, dsa);
2070 }
2071
2072 /**
2073 * Generate a set of unique buffer object IDs and store them in \c buffers.
2074 *
2075 * \param n Number of IDs to generate.
2076 * \param buffers Array of \c n locations to store the IDs.
2077 */
2078 void GLAPIENTRY
_mesa_GenBuffers_no_error(GLsizei n,GLuint * buffers)2079 _mesa_GenBuffers_no_error(GLsizei n, GLuint *buffers)
2080 {
2081 GET_CURRENT_CONTEXT(ctx);
2082 create_buffers(ctx, n, buffers, false);
2083 }
2084
2085
2086 void GLAPIENTRY
_mesa_GenBuffers(GLsizei n,GLuint * buffers)2087 _mesa_GenBuffers(GLsizei n, GLuint *buffers)
2088 {
2089 GET_CURRENT_CONTEXT(ctx);
2090 create_buffers_err(ctx, n, buffers, false);
2091 }
2092
2093 /**
2094 * Create a set of buffer objects and store their unique IDs in \c buffers.
2095 *
2096 * \param n Number of IDs to generate.
2097 * \param buffers Array of \c n locations to store the IDs.
2098 */
2099 void GLAPIENTRY
_mesa_CreateBuffers_no_error(GLsizei n,GLuint * buffers)2100 _mesa_CreateBuffers_no_error(GLsizei n, GLuint *buffers)
2101 {
2102 GET_CURRENT_CONTEXT(ctx);
2103 create_buffers(ctx, n, buffers, true);
2104 }
2105
2106
2107 void GLAPIENTRY
_mesa_CreateBuffers(GLsizei n,GLuint * buffers)2108 _mesa_CreateBuffers(GLsizei n, GLuint *buffers)
2109 {
2110 GET_CURRENT_CONTEXT(ctx);
2111 create_buffers_err(ctx, n, buffers, true);
2112 }
2113
2114
2115 /**
2116 * Determine if ID is the name of a buffer object.
2117 *
2118 * \param id ID of the potential buffer object.
2119 * \return \c GL_TRUE if \c id is the name of a buffer object,
2120 * \c GL_FALSE otherwise.
2121 */
2122 GLboolean GLAPIENTRY
_mesa_IsBuffer(GLuint id)2123 _mesa_IsBuffer(GLuint id)
2124 {
2125 struct gl_buffer_object *bufObj;
2126 GET_CURRENT_CONTEXT(ctx);
2127 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
2128
2129 bufObj = _mesa_lookup_bufferobj(ctx, id);
2130
2131 return bufObj && bufObj != &DummyBufferObject;
2132 }
2133
2134
2135 static bool
validate_buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLsizeiptr size,GLbitfield flags,const char * func)2136 validate_buffer_storage(struct gl_context *ctx,
2137 struct gl_buffer_object *bufObj, GLsizeiptr size,
2138 GLbitfield flags, const char *func)
2139 {
2140 if (size <= 0) {
2141 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size <= 0)", func);
2142 return false;
2143 }
2144
2145 GLbitfield valid_flags = GL_MAP_READ_BIT |
2146 GL_MAP_WRITE_BIT |
2147 GL_MAP_PERSISTENT_BIT |
2148 GL_MAP_COHERENT_BIT |
2149 GL_DYNAMIC_STORAGE_BIT |
2150 GL_CLIENT_STORAGE_BIT;
2151
2152 if (ctx->Extensions.ARB_sparse_buffer)
2153 valid_flags |= GL_SPARSE_STORAGE_BIT_ARB;
2154
2155 if (flags & ~valid_flags) {
2156 _mesa_error(ctx, GL_INVALID_VALUE, "%s(invalid flag bits set)", func);
2157 return false;
2158 }
2159
2160 /* The Errors section of the GL_ARB_sparse_buffer spec says:
2161 *
2162 * "INVALID_VALUE is generated by BufferStorage if <flags> contains
2163 * SPARSE_STORAGE_BIT_ARB and <flags> also contains any combination of
2164 * MAP_READ_BIT or MAP_WRITE_BIT."
2165 */
2166 if (flags & GL_SPARSE_STORAGE_BIT_ARB &&
2167 flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) {
2168 _mesa_error(ctx, GL_INVALID_VALUE, "%s(SPARSE_STORAGE and READ/WRITE)", func);
2169 return false;
2170 }
2171
2172 if (flags & GL_MAP_PERSISTENT_BIT &&
2173 !(flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT))) {
2174 _mesa_error(ctx, GL_INVALID_VALUE,
2175 "%s(PERSISTENT and flags!=READ/WRITE)", func);
2176 return false;
2177 }
2178
2179 if (flags & GL_MAP_COHERENT_BIT && !(flags & GL_MAP_PERSISTENT_BIT)) {
2180 _mesa_error(ctx, GL_INVALID_VALUE,
2181 "%s(COHERENT and flags!=PERSISTENT)", func);
2182 return false;
2183 }
2184
2185 if (bufObj->Immutable || bufObj->HandleAllocated) {
2186 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2187 return false;
2188 }
2189
2190 return true;
2191 }
2192
2193
2194 static void
buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,struct gl_memory_object * memObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint64 offset,const char * func)2195 buffer_storage(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2196 struct gl_memory_object *memObj, GLenum target,
2197 GLsizeiptr size, const GLvoid *data, GLbitfield flags,
2198 GLuint64 offset, const char *func)
2199 {
2200 GLboolean res;
2201
2202 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2203 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2204
2205 FLUSH_VERTICES(ctx, 0, 0);
2206
2207 bufObj->Written = GL_TRUE;
2208 bufObj->Immutable = GL_TRUE;
2209 bufObj->MinMaxCacheDirty = true;
2210
2211 if (memObj) {
2212 res = bufferobj_data_mem(ctx, target, size, memObj, offset,
2213 GL_DYNAMIC_DRAW, bufObj);
2214 }
2215 else {
2216 res = _mesa_bufferobj_data(ctx, target, size, data, GL_DYNAMIC_DRAW,
2217 flags, bufObj);
2218 }
2219
2220 if (!res) {
2221 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2222 /* Even though the interaction between AMD_pinned_memory and
2223 * glBufferStorage is not described in the spec, Graham Sellers
2224 * said that it should behave the same as glBufferData.
2225 */
2226 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2227 }
2228 else {
2229 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2230 }
2231 }
2232 }
2233
2234
2235 static ALWAYS_INLINE void
inlined_buffer_storage(GLenum target,GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint memory,GLuint64 offset,bool dsa,bool mem,bool no_error,const char * func)2236 inlined_buffer_storage(GLenum target, GLuint buffer, GLsizeiptr size,
2237 const GLvoid *data, GLbitfield flags,
2238 GLuint memory, GLuint64 offset,
2239 bool dsa, bool mem, bool no_error, const char *func)
2240 {
2241 GET_CURRENT_CONTEXT(ctx);
2242 struct gl_buffer_object *bufObj;
2243 struct gl_memory_object *memObj = NULL;
2244
2245 if (mem) {
2246 if (!no_error) {
2247 if (!ctx->Extensions.EXT_memory_object) {
2248 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(unsupported)", func);
2249 return;
2250 }
2251
2252 /* From the EXT_external_objects spec:
2253 *
2254 * "An INVALID_VALUE error is generated by BufferStorageMemEXT and
2255 * NamedBufferStorageMemEXT if <memory> is 0, or ..."
2256 */
2257 if (memory == 0) {
2258 _mesa_error(ctx, GL_INVALID_VALUE, "%s(memory == 0)", func);
2259 }
2260 }
2261
2262 memObj = _mesa_lookup_memory_object(ctx, memory);
2263 if (!memObj)
2264 return;
2265
2266 /* From the EXT_external_objects spec:
2267 *
2268 * "An INVALID_OPERATION error is generated if <memory> names a
2269 * valid memory object which has no associated memory."
2270 */
2271 if (!no_error && !memObj->Immutable) {
2272 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(no associated memory)",
2273 func);
2274 return;
2275 }
2276 }
2277
2278 if (dsa) {
2279 if (no_error) {
2280 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2281 } else {
2282 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2283 if (!bufObj)
2284 return;
2285 }
2286 } else {
2287 if (no_error) {
2288 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
2289 bufObj = *bufObjPtr;
2290 } else {
2291 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2292 if (!bufObj)
2293 return;
2294 }
2295 }
2296
2297 if (no_error || validate_buffer_storage(ctx, bufObj, size, flags, func))
2298 buffer_storage(ctx, bufObj, memObj, target, size, data, flags, offset, func);
2299 }
2300
2301
2302 void GLAPIENTRY
_mesa_BufferStorage_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2303 _mesa_BufferStorage_no_error(GLenum target, GLsizeiptr size,
2304 const GLvoid *data, GLbitfield flags)
2305 {
2306 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2307 false, false, true, "glBufferStorage");
2308 }
2309
2310
2311 void GLAPIENTRY
_mesa_BufferStorage(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2312 _mesa_BufferStorage(GLenum target, GLsizeiptr size, const GLvoid *data,
2313 GLbitfield flags)
2314 {
2315 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2316 false, false, false, "glBufferStorage");
2317 }
2318
2319 void GLAPIENTRY
_mesa_NamedBufferStorageEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2320 _mesa_NamedBufferStorageEXT(GLuint buffer, GLsizeiptr size,
2321 const GLvoid *data, GLbitfield flags)
2322 {
2323 GET_CURRENT_CONTEXT(ctx);
2324
2325 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2326 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
2327 &bufObj, "glNamedBufferStorageEXT", false))
2328 return;
2329
2330 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2331 true, false, false, "glNamedBufferStorageEXT");
2332 }
2333
2334
2335 void GLAPIENTRY
_mesa_BufferStorageMemEXT(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2336 _mesa_BufferStorageMemEXT(GLenum target, GLsizeiptr size,
2337 GLuint memory, GLuint64 offset)
2338 {
2339 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2340 false, true, false, "glBufferStorageMemEXT");
2341 }
2342
2343
2344 void GLAPIENTRY
_mesa_BufferStorageMemEXT_no_error(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2345 _mesa_BufferStorageMemEXT_no_error(GLenum target, GLsizeiptr size,
2346 GLuint memory, GLuint64 offset)
2347 {
2348 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2349 false, true, true, "glBufferStorageMemEXT");
2350 }
2351
2352
2353 void GLAPIENTRY
_mesa_NamedBufferStorage_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2354 _mesa_NamedBufferStorage_no_error(GLuint buffer, GLsizeiptr size,
2355 const GLvoid *data, GLbitfield flags)
2356 {
2357 /* In direct state access, buffer objects have an unspecified target
2358 * since they are not required to be bound.
2359 */
2360 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2361 true, false, true, "glNamedBufferStorage");
2362 }
2363
2364
2365 void GLAPIENTRY
_mesa_NamedBufferStorage(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2366 _mesa_NamedBufferStorage(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2367 GLbitfield flags)
2368 {
2369 /* In direct state access, buffer objects have an unspecified target
2370 * since they are not required to be bound.
2371 */
2372 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2373 true, false, false, "glNamedBufferStorage");
2374 }
2375
2376 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2377 _mesa_NamedBufferStorageMemEXT(GLuint buffer, GLsizeiptr size,
2378 GLuint memory, GLuint64 offset)
2379 {
2380 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2381 true, true, false, "glNamedBufferStorageMemEXT");
2382 }
2383
2384
2385 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2386 _mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer, GLsizeiptr size,
2387 GLuint memory, GLuint64 offset)
2388 {
2389 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2390 true, true, true, "glNamedBufferStorageMemEXT");
2391 }
2392
2393
2394 static ALWAYS_INLINE void
buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func,bool no_error)2395 buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2396 GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage,
2397 const char *func, bool no_error)
2398 {
2399 bool valid_usage;
2400
2401 if (MESA_VERBOSE & VERBOSE_API) {
2402 _mesa_debug(ctx, "%s(%s, %ld, %p, %s)\n",
2403 func,
2404 _mesa_enum_to_string(target),
2405 (long int) size, data,
2406 _mesa_enum_to_string(usage));
2407 }
2408
2409 if (!no_error) {
2410 if (size < 0) {
2411 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", func);
2412 return;
2413 }
2414
2415 switch (usage) {
2416 case GL_STREAM_DRAW_ARB:
2417 valid_usage = (ctx->API != API_OPENGLES);
2418 break;
2419 case GL_STATIC_DRAW_ARB:
2420 case GL_DYNAMIC_DRAW_ARB:
2421 valid_usage = true;
2422 break;
2423 case GL_STREAM_READ_ARB:
2424 case GL_STREAM_COPY_ARB:
2425 case GL_STATIC_READ_ARB:
2426 case GL_STATIC_COPY_ARB:
2427 case GL_DYNAMIC_READ_ARB:
2428 case GL_DYNAMIC_COPY_ARB:
2429 valid_usage = _mesa_is_desktop_gl(ctx) || _mesa_is_gles3(ctx);
2430 break;
2431 default:
2432 valid_usage = false;
2433 break;
2434 }
2435
2436 if (!valid_usage) {
2437 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid usage: %s)", func,
2438 _mesa_enum_to_string(usage));
2439 return;
2440 }
2441
2442 if (bufObj->Immutable || bufObj->HandleAllocated) {
2443 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2444 return;
2445 }
2446 }
2447
2448 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2449 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2450
2451 FLUSH_VERTICES(ctx, 0, 0);
2452
2453 bufObj->Written = GL_TRUE;
2454 bufObj->MinMaxCacheDirty = true;
2455
2456 #ifdef VBO_DEBUG
2457 printf("glBufferDataARB(%u, sz %ld, from %p, usage 0x%x)\n",
2458 bufObj->Name, size, data, usage);
2459 #endif
2460
2461 #ifdef BOUNDS_CHECK
2462 size += 100;
2463 #endif
2464
2465 if (!_mesa_bufferobj_data(ctx, target, size, data, usage,
2466 GL_MAP_READ_BIT |
2467 GL_MAP_WRITE_BIT |
2468 GL_DYNAMIC_STORAGE_BIT,
2469 bufObj)) {
2470 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2471 if (!no_error) {
2472 /* From GL_AMD_pinned_memory:
2473 *
2474 * INVALID_OPERATION is generated by BufferData if <target> is
2475 * EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, and the store cannot be
2476 * mapped to the GPU address space.
2477 */
2478 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2479 }
2480 } else {
2481 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2482 }
2483 }
2484 }
2485
2486 static void
buffer_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2487 buffer_data_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2488 GLenum target, GLsizeiptr size, const GLvoid *data,
2489 GLenum usage, const char *func)
2490 {
2491 buffer_data(ctx, bufObj, target, size, data, usage, func, false);
2492 }
2493
2494 static void
buffer_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2495 buffer_data_no_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2496 GLenum target, GLsizeiptr size, const GLvoid *data,
2497 GLenum usage, const char *func)
2498 {
2499 buffer_data(ctx, bufObj, target, size, data, usage, func, true);
2500 }
2501
2502 void
_mesa_buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2503 _mesa_buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2504 GLenum target, GLsizeiptr size, const GLvoid *data,
2505 GLenum usage, const char *func)
2506 {
2507 buffer_data_error(ctx, bufObj, target, size, data, usage, func);
2508 }
2509
2510 void GLAPIENTRY
_mesa_BufferData_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2511 _mesa_BufferData_no_error(GLenum target, GLsizeiptr size, const GLvoid *data,
2512 GLenum usage)
2513 {
2514 GET_CURRENT_CONTEXT(ctx);
2515
2516 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target);
2517 buffer_data_no_error(ctx, *bufObj, target, size, data, usage,
2518 "glBufferData");
2519 }
2520
2521 void GLAPIENTRY
_mesa_BufferData(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2522 _mesa_BufferData(GLenum target, GLsizeiptr size,
2523 const GLvoid *data, GLenum usage)
2524 {
2525 GET_CURRENT_CONTEXT(ctx);
2526 struct gl_buffer_object *bufObj;
2527
2528 bufObj = get_buffer(ctx, "glBufferData", target, GL_INVALID_OPERATION);
2529 if (!bufObj)
2530 return;
2531
2532 _mesa_buffer_data(ctx, bufObj, target, size, data, usage,
2533 "glBufferData");
2534 }
2535
2536 void GLAPIENTRY
_mesa_NamedBufferData_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2537 _mesa_NamedBufferData_no_error(GLuint buffer, GLsizeiptr size,
2538 const GLvoid *data, GLenum usage)
2539 {
2540 GET_CURRENT_CONTEXT(ctx);
2541
2542 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2543 buffer_data_no_error(ctx, bufObj, GL_NONE, size, data, usage,
2544 "glNamedBufferData");
2545 }
2546
2547 void GLAPIENTRY
_mesa_NamedBufferData(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2548 _mesa_NamedBufferData(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2549 GLenum usage)
2550 {
2551 GET_CURRENT_CONTEXT(ctx);
2552 struct gl_buffer_object *bufObj;
2553
2554 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glNamedBufferData");
2555 if (!bufObj)
2556 return;
2557
2558 /* In direct state access, buffer objects have an unspecified target since
2559 * they are not required to be bound.
2560 */
2561 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2562 "glNamedBufferData");
2563 }
2564
2565 void GLAPIENTRY
_mesa_NamedBufferDataEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2566 _mesa_NamedBufferDataEXT(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2567 GLenum usage)
2568 {
2569 GET_CURRENT_CONTEXT(ctx);
2570 struct gl_buffer_object *bufObj;
2571
2572 if (!buffer) {
2573 _mesa_error(ctx, GL_INVALID_OPERATION,
2574 "glNamedBufferDataEXT(buffer=0)");
2575 return;
2576 }
2577
2578 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2579 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
2580 &bufObj, "glNamedBufferDataEXT", false))
2581 return;
2582
2583 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2584 "glNamedBufferDataEXT");
2585 }
2586
2587 static bool
validate_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const char * func)2588 validate_buffer_sub_data(struct gl_context *ctx,
2589 struct gl_buffer_object *bufObj,
2590 GLintptr offset, GLsizeiptr size,
2591 const char *func)
2592 {
2593 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2594 true, func)) {
2595 /* error already recorded */
2596 return false;
2597 }
2598
2599 if (bufObj->Immutable &&
2600 !(bufObj->StorageFlags & GL_DYNAMIC_STORAGE_BIT)) {
2601 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2602 return false;
2603 }
2604
2605 if ((bufObj->Usage == GL_STATIC_DRAW ||
2606 bufObj->Usage == GL_STATIC_COPY) &&
2607 bufObj->NumSubDataCalls >= BUFFER_WARNING_CALL_COUNT - 1) {
2608 /* If the application declared the buffer as static draw/copy or stream
2609 * draw, it should not be frequently modified with glBufferSubData.
2610 */
2611 BUFFER_USAGE_WARNING(ctx,
2612 "using %s(buffer %u, offset %u, size %u) to "
2613 "update a %s buffer",
2614 func, bufObj->Name, offset, size,
2615 _mesa_enum_to_string(bufObj->Usage));
2616 }
2617
2618 return true;
2619 }
2620
2621
2622 /**
2623 * Implementation for glBufferSubData and glNamedBufferSubData.
2624 *
2625 * \param ctx GL context.
2626 * \param bufObj The buffer object.
2627 * \param offset Offset of the first byte of the subdata range.
2628 * \param size Size, in bytes, of the subdata range.
2629 * \param data The data store.
2630 * \param func Name of calling function for recording errors.
2631 *
2632 */
2633 void
_mesa_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const GLvoid * data)2634 _mesa_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2635 GLintptr offset, GLsizeiptr size, const GLvoid *data)
2636 {
2637 if (size == 0)
2638 return;
2639
2640 bufObj->NumSubDataCalls++;
2641 bufObj->Written = GL_TRUE;
2642 bufObj->MinMaxCacheDirty = true;
2643
2644 _mesa_bufferobj_subdata(ctx, offset, size, data, bufObj);
2645 }
2646
2647
2648 static ALWAYS_INLINE void
buffer_sub_data(GLenum target,GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data,bool dsa,bool no_error,const char * func)2649 buffer_sub_data(GLenum target, GLuint buffer, GLintptr offset,
2650 GLsizeiptr size, const GLvoid *data,
2651 bool dsa, bool no_error, const char *func)
2652 {
2653 GET_CURRENT_CONTEXT(ctx);
2654 struct gl_buffer_object *bufObj;
2655
2656 if (dsa) {
2657 if (no_error) {
2658 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2659 } else {
2660 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2661 if (!bufObj)
2662 return;
2663 }
2664 } else {
2665 if (no_error) {
2666 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
2667 bufObj = *bufObjPtr;
2668 } else {
2669 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2670 if (!bufObj)
2671 return;
2672 }
2673 }
2674
2675 if (no_error || validate_buffer_sub_data(ctx, bufObj, offset, size, func))
2676 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2677 }
2678
2679
2680 void GLAPIENTRY
_mesa_BufferSubData_no_error(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2681 _mesa_BufferSubData_no_error(GLenum target, GLintptr offset,
2682 GLsizeiptr size, const GLvoid *data)
2683 {
2684 buffer_sub_data(target, 0, offset, size, data, false, true,
2685 "glBufferSubData");
2686 }
2687
2688
2689 void GLAPIENTRY
_mesa_BufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2690 _mesa_BufferSubData(GLenum target, GLintptr offset,
2691 GLsizeiptr size, const GLvoid *data)
2692 {
2693 buffer_sub_data(target, 0, offset, size, data, false, false,
2694 "glBufferSubData");
2695 }
2696
2697 void GLAPIENTRY
_mesa_NamedBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2698 _mesa_NamedBufferSubData_no_error(GLuint buffer, GLintptr offset,
2699 GLsizeiptr size, const GLvoid *data)
2700 {
2701 buffer_sub_data(0, buffer, offset, size, data, true, true,
2702 "glNamedBufferSubData");
2703 }
2704
2705 void GLAPIENTRY
_mesa_NamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2706 _mesa_NamedBufferSubData(GLuint buffer, GLintptr offset,
2707 GLsizeiptr size, const GLvoid *data)
2708 {
2709 buffer_sub_data(0, buffer, offset, size, data, true, false,
2710 "glNamedBufferSubData");
2711 }
2712
2713 void GLAPIENTRY
_mesa_NamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2714 _mesa_NamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2715 GLsizeiptr size, const GLvoid *data)
2716 {
2717 GET_CURRENT_CONTEXT(ctx);
2718 struct gl_buffer_object *bufObj;
2719
2720 if (!buffer) {
2721 _mesa_error(ctx, GL_INVALID_OPERATION,
2722 "glNamedBufferSubDataEXT(buffer=0)");
2723 return;
2724 }
2725
2726 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2727 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
2728 &bufObj, "glNamedBufferSubDataEXT", false))
2729 return;
2730
2731 if (validate_buffer_sub_data(ctx, bufObj, offset, size,
2732 "glNamedBufferSubDataEXT")) {
2733 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2734 }
2735 }
2736
2737
2738 void GLAPIENTRY
_mesa_GetBufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,GLvoid * data)2739 _mesa_GetBufferSubData(GLenum target, GLintptr offset,
2740 GLsizeiptr size, GLvoid *data)
2741 {
2742 GET_CURRENT_CONTEXT(ctx);
2743 struct gl_buffer_object *bufObj;
2744
2745 bufObj = get_buffer(ctx, "glGetBufferSubData", target,
2746 GL_INVALID_OPERATION);
2747 if (!bufObj)
2748 return;
2749
2750 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2751 "glGetBufferSubData")) {
2752 return;
2753 }
2754
2755 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2756 }
2757
2758 void GLAPIENTRY
_mesa_GetNamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2759 _mesa_GetNamedBufferSubData(GLuint buffer, GLintptr offset,
2760 GLsizeiptr size, GLvoid *data)
2761 {
2762 GET_CURRENT_CONTEXT(ctx);
2763 struct gl_buffer_object *bufObj;
2764
2765 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
2766 "glGetNamedBufferSubData");
2767 if (!bufObj)
2768 return;
2769
2770 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2771 "glGetNamedBufferSubData")) {
2772 return;
2773 }
2774
2775 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2776 }
2777
2778
2779 void GLAPIENTRY
_mesa_GetNamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2780 _mesa_GetNamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2781 GLsizeiptr size, GLvoid *data)
2782 {
2783 GET_CURRENT_CONTEXT(ctx);
2784 struct gl_buffer_object *bufObj;
2785
2786 if (!buffer) {
2787 _mesa_error(ctx, GL_INVALID_OPERATION,
2788 "glGetNamedBufferSubDataEXT(buffer=0)");
2789 return;
2790 }
2791
2792 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2793 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
2794 &bufObj, "glGetNamedBufferSubDataEXT", false))
2795 return;
2796
2797 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2798 "glGetNamedBufferSubDataEXT")) {
2799 return;
2800 }
2801
2802 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2803 }
2804
2805 /**
2806 * \param subdata true if caller is *SubData, false if *Data
2807 */
2808 static ALWAYS_INLINE void
clear_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata,bool no_error)2809 clear_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2810 GLenum internalformat, GLintptr offset, GLsizeiptr size,
2811 GLenum format, GLenum type, const GLvoid *data,
2812 const char *func, bool subdata, bool no_error)
2813 {
2814 mesa_format mesaFormat;
2815 GLubyte clearValue[MAX_PIXEL_BYTES];
2816 GLsizeiptr clearValueSize;
2817
2818 /* This checks for disallowed mappings. */
2819 if (!no_error && !buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2820 subdata, func)) {
2821 return;
2822 }
2823
2824 if (no_error) {
2825 mesaFormat = _mesa_get_texbuffer_format(ctx, internalformat);
2826 } else {
2827 mesaFormat = validate_clear_buffer_format(ctx, internalformat,
2828 format, type, func);
2829 }
2830
2831 if (mesaFormat == MESA_FORMAT_NONE)
2832 return;
2833
2834 clearValueSize = _mesa_get_format_bytes(mesaFormat);
2835 if (!no_error &&
2836 (offset % clearValueSize != 0 || size % clearValueSize != 0)) {
2837 _mesa_error(ctx, GL_INVALID_VALUE,
2838 "%s(offset or size is not a multiple of "
2839 "internalformat size)", func);
2840 return;
2841 }
2842
2843 /* Bail early. Negative size has already been checked. */
2844 if (size == 0)
2845 return;
2846
2847 bufObj->MinMaxCacheDirty = true;
2848
2849 if (!ctx->pipe->clear_buffer) {
2850 clear_buffer_subdata_sw(ctx, offset, size,
2851 data, clearValueSize, bufObj);
2852 return;
2853 }
2854
2855 if (!data)
2856 memset(clearValue, 0, MAX_PIXEL_BYTES);
2857 else if (!convert_clear_buffer_data(ctx, mesaFormat, clearValue,
2858 format, type, data, func)) {
2859 return;
2860 }
2861
2862 ctx->pipe->clear_buffer(ctx->pipe, bufObj->buffer, offset, size,
2863 clearValue, clearValueSize);
2864 }
2865
2866 static void
clear_buffer_sub_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2867 clear_buffer_sub_data_error(struct gl_context *ctx,
2868 struct gl_buffer_object *bufObj,
2869 GLenum internalformat, GLintptr offset,
2870 GLsizeiptr size, GLenum format, GLenum type,
2871 const GLvoid *data, const char *func, bool subdata)
2872 {
2873 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2874 type, data, func, subdata, false);
2875 }
2876
2877
2878 static void
clear_buffer_sub_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2879 clear_buffer_sub_data_no_error(struct gl_context *ctx,
2880 struct gl_buffer_object *bufObj,
2881 GLenum internalformat, GLintptr offset,
2882 GLsizeiptr size, GLenum format, GLenum type,
2883 const GLvoid *data, const char *func,
2884 bool subdata)
2885 {
2886 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2887 type, data, func, subdata, true);
2888 }
2889
2890
2891 void GLAPIENTRY
_mesa_ClearBufferData_no_error(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2892 _mesa_ClearBufferData_no_error(GLenum target, GLenum internalformat,
2893 GLenum format, GLenum type, const GLvoid *data)
2894 {
2895 GET_CURRENT_CONTEXT(ctx);
2896
2897 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target);
2898 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, 0,
2899 (*bufObj)->Size, format, type, data,
2900 "glClearBufferData", false);
2901 }
2902
2903
2904 void GLAPIENTRY
_mesa_ClearBufferData(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2905 _mesa_ClearBufferData(GLenum target, GLenum internalformat, GLenum format,
2906 GLenum type, const GLvoid *data)
2907 {
2908 GET_CURRENT_CONTEXT(ctx);
2909 struct gl_buffer_object *bufObj;
2910
2911 bufObj = get_buffer(ctx, "glClearBufferData", target, GL_INVALID_VALUE);
2912 if (!bufObj)
2913 return;
2914
2915 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2916 format, type, data, "glClearBufferData", false);
2917 }
2918
2919
2920 void GLAPIENTRY
_mesa_ClearNamedBufferData_no_error(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2921 _mesa_ClearNamedBufferData_no_error(GLuint buffer, GLenum internalformat,
2922 GLenum format, GLenum type,
2923 const GLvoid *data)
2924 {
2925 GET_CURRENT_CONTEXT(ctx);
2926
2927 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2928 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2929 format, type, data, "glClearNamedBufferData",
2930 false);
2931 }
2932
2933
2934 void GLAPIENTRY
_mesa_ClearNamedBufferData(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2935 _mesa_ClearNamedBufferData(GLuint buffer, GLenum internalformat,
2936 GLenum format, GLenum type, const GLvoid *data)
2937 {
2938 GET_CURRENT_CONTEXT(ctx);
2939 struct gl_buffer_object *bufObj;
2940
2941 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glClearNamedBufferData");
2942 if (!bufObj)
2943 return;
2944
2945 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2946 format, type, data, "glClearNamedBufferData",
2947 false);
2948 }
2949
2950
2951 void GLAPIENTRY
_mesa_ClearNamedBufferDataEXT(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2952 _mesa_ClearNamedBufferDataEXT(GLuint buffer, GLenum internalformat,
2953 GLenum format, GLenum type, const GLvoid *data)
2954 {
2955 GET_CURRENT_CONTEXT(ctx);
2956 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2957 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
2958 &bufObj, "glClearNamedBufferDataEXT", false))
2959 return;
2960
2961 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2962 format, type, data, "glClearNamedBufferDataEXT",
2963 false);
2964 }
2965
2966
2967 void GLAPIENTRY
_mesa_ClearBufferSubData_no_error(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2968 _mesa_ClearBufferSubData_no_error(GLenum target, GLenum internalformat,
2969 GLintptr offset, GLsizeiptr size,
2970 GLenum format, GLenum type,
2971 const GLvoid *data)
2972 {
2973 GET_CURRENT_CONTEXT(ctx);
2974
2975 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target);
2976 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, offset, size,
2977 format, type, data, "glClearBufferSubData",
2978 true);
2979 }
2980
2981
2982 void GLAPIENTRY
_mesa_ClearBufferSubData(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2983 _mesa_ClearBufferSubData(GLenum target, GLenum internalformat,
2984 GLintptr offset, GLsizeiptr size,
2985 GLenum format, GLenum type,
2986 const GLvoid *data)
2987 {
2988 GET_CURRENT_CONTEXT(ctx);
2989 struct gl_buffer_object *bufObj;
2990
2991 bufObj = get_buffer(ctx, "glClearBufferSubData", target, GL_INVALID_VALUE);
2992 if (!bufObj)
2993 return;
2994
2995 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2996 format, type, data, "glClearBufferSubData",
2997 true);
2998 }
2999
3000
3001 void GLAPIENTRY
_mesa_ClearNamedBufferSubData_no_error(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)3002 _mesa_ClearNamedBufferSubData_no_error(GLuint buffer, GLenum internalformat,
3003 GLintptr offset, GLsizeiptr size,
3004 GLenum format, GLenum type,
3005 const GLvoid *data)
3006 {
3007 GET_CURRENT_CONTEXT(ctx);
3008
3009 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3010 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, offset, size,
3011 format, type, data,
3012 "glClearNamedBufferSubData", true);
3013 }
3014
3015
3016 void GLAPIENTRY
_mesa_ClearNamedBufferSubData(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)3017 _mesa_ClearNamedBufferSubData(GLuint buffer, GLenum internalformat,
3018 GLintptr offset, GLsizeiptr size,
3019 GLenum format, GLenum type,
3020 const GLvoid *data)
3021 {
3022 GET_CURRENT_CONTEXT(ctx);
3023 struct gl_buffer_object *bufObj;
3024
3025 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3026 "glClearNamedBufferSubData");
3027 if (!bufObj)
3028 return;
3029
3030 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
3031 format, type, data, "glClearNamedBufferSubData",
3032 true);
3033 }
3034
3035 void GLAPIENTRY
_mesa_ClearNamedBufferSubDataEXT(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)3036 _mesa_ClearNamedBufferSubDataEXT(GLuint buffer, GLenum internalformat,
3037 GLintptr offset, GLsizeiptr size,
3038 GLenum format, GLenum type,
3039 const GLvoid *data)
3040 {
3041 GET_CURRENT_CONTEXT(ctx);
3042 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3043 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
3044 &bufObj, "glClearNamedBufferSubDataEXT", false))
3045 return;
3046
3047 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
3048 format, type, data, "glClearNamedBufferSubDataEXT",
3049 true);
3050 }
3051
3052 static GLboolean
unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj)3053 unmap_buffer(struct gl_context *ctx, struct gl_buffer_object *bufObj)
3054 {
3055 GLboolean status = _mesa_bufferobj_unmap(ctx, bufObj, MAP_USER);
3056 bufObj->Mappings[MAP_USER].AccessFlags = 0;
3057 assert(bufObj->Mappings[MAP_USER].Pointer == NULL);
3058 assert(bufObj->Mappings[MAP_USER].Offset == 0);
3059 assert(bufObj->Mappings[MAP_USER].Length == 0);
3060
3061 return status;
3062 }
3063
3064 static GLboolean
validate_and_unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj,const char * func)3065 validate_and_unmap_buffer(struct gl_context *ctx,
3066 struct gl_buffer_object *bufObj,
3067 const char *func)
3068 {
3069 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
3070
3071 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3072 _mesa_error(ctx, GL_INVALID_OPERATION,
3073 "%s(buffer is not mapped)", func);
3074 return GL_FALSE;
3075 }
3076
3077 #ifdef BOUNDS_CHECK
3078 if (bufObj->Mappings[MAP_USER].AccessFlags != GL_READ_ONLY_ARB) {
3079 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3080 GLuint i;
3081 /* check that last 100 bytes are still = magic value */
3082 for (i = 0; i < 100; i++) {
3083 GLuint pos = bufObj->Size - i - 1;
3084 if (buf[pos] != 123) {
3085 _mesa_warning(ctx, "Out of bounds buffer object write detected"
3086 " at position %d (value = %u)\n",
3087 pos, buf[pos]);
3088 }
3089 }
3090 }
3091 #endif
3092
3093 #ifdef VBO_DEBUG
3094 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT) {
3095 GLuint i, unchanged = 0;
3096 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3097 GLint pos = -1;
3098 /* check which bytes changed */
3099 for (i = 0; i < bufObj->Size - 1; i++) {
3100 if (b[i] == (i & 0xff) && b[i+1] == ((i+1) & 0xff)) {
3101 unchanged++;
3102 if (pos == -1)
3103 pos = i;
3104 }
3105 }
3106 if (unchanged) {
3107 printf("glUnmapBufferARB(%u): %u of %ld unchanged, starting at %d\n",
3108 bufObj->Name, unchanged, bufObj->Size, pos);
3109 }
3110 }
3111 #endif
3112
3113 return unmap_buffer(ctx, bufObj);
3114 }
3115
3116 GLboolean GLAPIENTRY
_mesa_UnmapBuffer_no_error(GLenum target)3117 _mesa_UnmapBuffer_no_error(GLenum target)
3118 {
3119 GET_CURRENT_CONTEXT(ctx);
3120 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
3121 struct gl_buffer_object *bufObj = *bufObjPtr;
3122
3123 return unmap_buffer(ctx, bufObj);
3124 }
3125
3126 GLboolean GLAPIENTRY
_mesa_UnmapBuffer(GLenum target)3127 _mesa_UnmapBuffer(GLenum target)
3128 {
3129 GET_CURRENT_CONTEXT(ctx);
3130 struct gl_buffer_object *bufObj;
3131
3132 bufObj = get_buffer(ctx, "glUnmapBuffer", target, GL_INVALID_OPERATION);
3133 if (!bufObj)
3134 return GL_FALSE;
3135
3136 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapBuffer");
3137 }
3138
3139 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)3140 _mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)
3141 {
3142 GET_CURRENT_CONTEXT(ctx);
3143 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3144
3145 return unmap_buffer(ctx, bufObj);
3146 }
3147
3148 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT(GLuint buffer)3149 _mesa_UnmapNamedBufferEXT(GLuint buffer)
3150 {
3151 GET_CURRENT_CONTEXT(ctx);
3152 struct gl_buffer_object *bufObj;
3153
3154 if (!buffer) {
3155 _mesa_error(ctx, GL_INVALID_OPERATION,
3156 "glUnmapNamedBufferEXT(buffer=0)");
3157 return GL_FALSE;
3158 }
3159
3160 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glUnmapNamedBuffer");
3161 if (!bufObj)
3162 return GL_FALSE;
3163
3164 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapNamedBuffer");
3165 }
3166
3167
3168 static bool
get_buffer_parameter(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum pname,GLint64 * params,const char * func)3169 get_buffer_parameter(struct gl_context *ctx,
3170 struct gl_buffer_object *bufObj, GLenum pname,
3171 GLint64 *params, const char *func)
3172 {
3173 switch (pname) {
3174 case GL_BUFFER_SIZE_ARB:
3175 *params = bufObj->Size;
3176 break;
3177 case GL_BUFFER_USAGE_ARB:
3178 *params = bufObj->Usage;
3179 break;
3180 case GL_BUFFER_ACCESS_ARB:
3181 *params = simplified_access_mode(ctx,
3182 bufObj->Mappings[MAP_USER].AccessFlags);
3183 break;
3184 case GL_BUFFER_MAPPED_ARB:
3185 *params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
3186 break;
3187 case GL_BUFFER_ACCESS_FLAGS:
3188 if (!ctx->Extensions.ARB_map_buffer_range)
3189 goto invalid_pname;
3190 *params = bufObj->Mappings[MAP_USER].AccessFlags;
3191 break;
3192 case GL_BUFFER_MAP_OFFSET:
3193 if (!ctx->Extensions.ARB_map_buffer_range)
3194 goto invalid_pname;
3195 *params = bufObj->Mappings[MAP_USER].Offset;
3196 break;
3197 case GL_BUFFER_MAP_LENGTH:
3198 if (!ctx->Extensions.ARB_map_buffer_range)
3199 goto invalid_pname;
3200 *params = bufObj->Mappings[MAP_USER].Length;
3201 break;
3202 case GL_BUFFER_IMMUTABLE_STORAGE:
3203 if (!ctx->Extensions.ARB_buffer_storage)
3204 goto invalid_pname;
3205 *params = bufObj->Immutable;
3206 break;
3207 case GL_BUFFER_STORAGE_FLAGS:
3208 if (!ctx->Extensions.ARB_buffer_storage)
3209 goto invalid_pname;
3210 *params = bufObj->StorageFlags;
3211 break;
3212 default:
3213 goto invalid_pname;
3214 }
3215
3216 return true;
3217
3218 invalid_pname:
3219 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid pname: %s)", func,
3220 _mesa_enum_to_string(pname));
3221 return false;
3222 }
3223
3224 void GLAPIENTRY
_mesa_GetBufferParameteriv(GLenum target,GLenum pname,GLint * params)3225 _mesa_GetBufferParameteriv(GLenum target, GLenum pname, GLint *params)
3226 {
3227 GET_CURRENT_CONTEXT(ctx);
3228 struct gl_buffer_object *bufObj;
3229 GLint64 parameter;
3230
3231 bufObj = get_buffer(ctx, "glGetBufferParameteriv", target,
3232 GL_INVALID_OPERATION);
3233 if (!bufObj)
3234 return;
3235
3236 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3237 "glGetBufferParameteriv"))
3238 return; /* Error already recorded. */
3239
3240 *params = (GLint) parameter;
3241 }
3242
3243 void GLAPIENTRY
_mesa_GetBufferParameteri64v(GLenum target,GLenum pname,GLint64 * params)3244 _mesa_GetBufferParameteri64v(GLenum target, GLenum pname, GLint64 *params)
3245 {
3246 GET_CURRENT_CONTEXT(ctx);
3247 struct gl_buffer_object *bufObj;
3248 GLint64 parameter;
3249
3250 bufObj = get_buffer(ctx, "glGetBufferParameteri64v", target,
3251 GL_INVALID_OPERATION);
3252 if (!bufObj)
3253 return;
3254
3255 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3256 "glGetBufferParameteri64v"))
3257 return; /* Error already recorded. */
3258
3259 *params = parameter;
3260 }
3261
3262 void GLAPIENTRY
_mesa_GetNamedBufferParameteriv(GLuint buffer,GLenum pname,GLint * params)3263 _mesa_GetNamedBufferParameteriv(GLuint buffer, GLenum pname, GLint *params)
3264 {
3265 GET_CURRENT_CONTEXT(ctx);
3266 struct gl_buffer_object *bufObj;
3267 GLint64 parameter;
3268
3269 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3270 "glGetNamedBufferParameteriv");
3271 if (!bufObj)
3272 return;
3273
3274 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3275 "glGetNamedBufferParameteriv"))
3276 return; /* Error already recorded. */
3277
3278 *params = (GLint) parameter;
3279 }
3280
3281 void GLAPIENTRY
_mesa_GetNamedBufferParameterivEXT(GLuint buffer,GLenum pname,GLint * params)3282 _mesa_GetNamedBufferParameterivEXT(GLuint buffer, GLenum pname, GLint *params)
3283 {
3284 GET_CURRENT_CONTEXT(ctx);
3285 struct gl_buffer_object *bufObj;
3286 GLint64 parameter;
3287
3288 if (!buffer) {
3289 _mesa_error(ctx, GL_INVALID_OPERATION,
3290 "glGetNamedBufferParameterivEXT: buffer=0");
3291 return;
3292 }
3293
3294 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3295 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
3296 &bufObj, "glGetNamedBufferParameterivEXT", false))
3297 return;
3298
3299 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3300 "glGetNamedBufferParameterivEXT"))
3301 return; /* Error already recorded. */
3302
3303 *params = (GLint) parameter;
3304 }
3305
3306 void GLAPIENTRY
_mesa_GetNamedBufferParameteri64v(GLuint buffer,GLenum pname,GLint64 * params)3307 _mesa_GetNamedBufferParameteri64v(GLuint buffer, GLenum pname,
3308 GLint64 *params)
3309 {
3310 GET_CURRENT_CONTEXT(ctx);
3311 struct gl_buffer_object *bufObj;
3312 GLint64 parameter;
3313
3314 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3315 "glGetNamedBufferParameteri64v");
3316 if (!bufObj)
3317 return;
3318
3319 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3320 "glGetNamedBufferParameteri64v"))
3321 return; /* Error already recorded. */
3322
3323 *params = parameter;
3324 }
3325
3326
3327 void GLAPIENTRY
_mesa_GetBufferPointerv(GLenum target,GLenum pname,GLvoid ** params)3328 _mesa_GetBufferPointerv(GLenum target, GLenum pname, GLvoid **params)
3329 {
3330 GET_CURRENT_CONTEXT(ctx);
3331 struct gl_buffer_object *bufObj;
3332
3333 if (pname != GL_BUFFER_MAP_POINTER) {
3334 _mesa_error(ctx, GL_INVALID_ENUM, "glGetBufferPointerv(pname != "
3335 "GL_BUFFER_MAP_POINTER)");
3336 return;
3337 }
3338
3339 bufObj = get_buffer(ctx, "glGetBufferPointerv", target,
3340 GL_INVALID_OPERATION);
3341 if (!bufObj)
3342 return;
3343
3344 *params = bufObj->Mappings[MAP_USER].Pointer;
3345 }
3346
3347 void GLAPIENTRY
_mesa_GetNamedBufferPointerv(GLuint buffer,GLenum pname,GLvoid ** params)3348 _mesa_GetNamedBufferPointerv(GLuint buffer, GLenum pname, GLvoid **params)
3349 {
3350 GET_CURRENT_CONTEXT(ctx);
3351 struct gl_buffer_object *bufObj;
3352
3353 if (pname != GL_BUFFER_MAP_POINTER) {
3354 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointerv(pname != "
3355 "GL_BUFFER_MAP_POINTER)");
3356 return;
3357 }
3358
3359 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3360 "glGetNamedBufferPointerv");
3361 if (!bufObj)
3362 return;
3363
3364 *params = bufObj->Mappings[MAP_USER].Pointer;
3365 }
3366
3367 void GLAPIENTRY
_mesa_GetNamedBufferPointervEXT(GLuint buffer,GLenum pname,GLvoid ** params)3368 _mesa_GetNamedBufferPointervEXT(GLuint buffer, GLenum pname, GLvoid **params)
3369 {
3370 GET_CURRENT_CONTEXT(ctx);
3371 struct gl_buffer_object *bufObj;
3372
3373 if (!buffer) {
3374 _mesa_error(ctx, GL_INVALID_OPERATION,
3375 "glGetNamedBufferPointervEXT(buffer=0)");
3376 return;
3377 }
3378 if (pname != GL_BUFFER_MAP_POINTER) {
3379 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointervEXT(pname != "
3380 "GL_BUFFER_MAP_POINTER)");
3381 return;
3382 }
3383
3384 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3385 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
3386 &bufObj, "glGetNamedBufferPointervEXT", false))
3387 return;
3388
3389 *params = bufObj->Mappings[MAP_USER].Pointer;
3390 }
3391
3392 static void
copy_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size,const char * func)3393 copy_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *src,
3394 struct gl_buffer_object *dst, GLintptr readOffset,
3395 GLintptr writeOffset, GLsizeiptr size, const char *func)
3396 {
3397 if (_mesa_check_disallowed_mapping(src)) {
3398 _mesa_error(ctx, GL_INVALID_OPERATION,
3399 "%s(readBuffer is mapped)", func);
3400 return;
3401 }
3402
3403 if (_mesa_check_disallowed_mapping(dst)) {
3404 _mesa_error(ctx, GL_INVALID_OPERATION,
3405 "%s(writeBuffer is mapped)", func);
3406 return;
3407 }
3408
3409 if (readOffset < 0) {
3410 _mesa_error(ctx, GL_INVALID_VALUE,
3411 "%s(readOffset %d < 0)", func, (int) readOffset);
3412 return;
3413 }
3414
3415 if (writeOffset < 0) {
3416 _mesa_error(ctx, GL_INVALID_VALUE,
3417 "%s(writeOffset %d < 0)", func, (int) writeOffset);
3418 return;
3419 }
3420
3421 if (size < 0) {
3422 _mesa_error(ctx, GL_INVALID_VALUE,
3423 "%s(size %d < 0)", func, (int) size);
3424 return;
3425 }
3426
3427 if (readOffset + size > src->Size) {
3428 _mesa_error(ctx, GL_INVALID_VALUE,
3429 "%s(readOffset %d + size %d > src_buffer_size %d)", func,
3430 (int) readOffset, (int) size, (int) src->Size);
3431 return;
3432 }
3433
3434 if (writeOffset + size > dst->Size) {
3435 _mesa_error(ctx, GL_INVALID_VALUE,
3436 "%s(writeOffset %d + size %d > dst_buffer_size %d)", func,
3437 (int) writeOffset, (int) size, (int) dst->Size);
3438 return;
3439 }
3440
3441 if (src == dst) {
3442 if (readOffset + size <= writeOffset) {
3443 /* OK */
3444 }
3445 else if (writeOffset + size <= readOffset) {
3446 /* OK */
3447 }
3448 else {
3449 /* overlapping src/dst is illegal */
3450 _mesa_error(ctx, GL_INVALID_VALUE,
3451 "%s(overlapping src/dst)", func);
3452 return;
3453 }
3454 }
3455
3456 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset, size);
3457 }
3458
3459 void GLAPIENTRY
_mesa_CopyBufferSubData_no_error(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3460 _mesa_CopyBufferSubData_no_error(GLenum readTarget, GLenum writeTarget,
3461 GLintptr readOffset, GLintptr writeOffset,
3462 GLsizeiptr size)
3463 {
3464 GET_CURRENT_CONTEXT(ctx);
3465
3466 struct gl_buffer_object **src_ptr = get_buffer_target(ctx, readTarget);
3467 struct gl_buffer_object *src = *src_ptr;
3468
3469 struct gl_buffer_object **dst_ptr = get_buffer_target(ctx, writeTarget);
3470 struct gl_buffer_object *dst = *dst_ptr;
3471
3472 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3473 size);
3474 }
3475
3476 void GLAPIENTRY
_mesa_CopyBufferSubData(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3477 _mesa_CopyBufferSubData(GLenum readTarget, GLenum writeTarget,
3478 GLintptr readOffset, GLintptr writeOffset,
3479 GLsizeiptr size)
3480 {
3481 GET_CURRENT_CONTEXT(ctx);
3482 struct gl_buffer_object *src, *dst;
3483
3484 src = get_buffer(ctx, "glCopyBufferSubData", readTarget,
3485 GL_INVALID_OPERATION);
3486 if (!src)
3487 return;
3488
3489 dst = get_buffer(ctx, "glCopyBufferSubData", writeTarget,
3490 GL_INVALID_OPERATION);
3491 if (!dst)
3492 return;
3493
3494 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3495 "glCopyBufferSubData");
3496 }
3497
3498 void GLAPIENTRY
_mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3499 _mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer, GLuint writeBuffer,
3500 GLintptr readOffset, GLintptr writeOffset,
3501 GLsizeiptr size)
3502 {
3503 GET_CURRENT_CONTEXT(ctx);
3504 struct gl_buffer_object *src, *dst;
3505
3506 src = _mesa_lookup_bufferobj(ctx, readBuffer);
3507 if (!_mesa_handle_bind_buffer_gen(ctx, readBuffer,
3508 &src,
3509 "glNamedCopyBufferSubDataEXT", false))
3510 return;
3511
3512 dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3513 if (!_mesa_handle_bind_buffer_gen(ctx, writeBuffer,
3514 &dst,
3515 "glNamedCopyBufferSubDataEXT", false))
3516 return;
3517
3518 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3519 "glNamedCopyBufferSubDataEXT");
3520 }
3521
3522 void GLAPIENTRY
_mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3523 _mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer, GLuint writeBuffer,
3524 GLintptr readOffset,
3525 GLintptr writeOffset, GLsizeiptr size)
3526 {
3527 GET_CURRENT_CONTEXT(ctx);
3528
3529 struct gl_buffer_object *src = _mesa_lookup_bufferobj(ctx, readBuffer);
3530 struct gl_buffer_object *dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3531
3532 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3533 size);
3534 }
3535
3536 void GLAPIENTRY
_mesa_CopyNamedBufferSubData(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3537 _mesa_CopyNamedBufferSubData(GLuint readBuffer, GLuint writeBuffer,
3538 GLintptr readOffset, GLintptr writeOffset,
3539 GLsizeiptr size)
3540 {
3541 GET_CURRENT_CONTEXT(ctx);
3542 struct gl_buffer_object *src, *dst;
3543
3544 src = _mesa_lookup_bufferobj_err(ctx, readBuffer,
3545 "glCopyNamedBufferSubData");
3546 if (!src)
3547 return;
3548
3549 dst = _mesa_lookup_bufferobj_err(ctx, writeBuffer,
3550 "glCopyNamedBufferSubData");
3551 if (!dst)
3552 return;
3553
3554 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3555 "glCopyNamedBufferSubData");
3556 }
3557
3558 void GLAPIENTRY
_mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer,GLuint srcOffset,GLuint dstTargetOrName,GLintptr dstOffset,GLsizeiptr size,GLboolean named,GLboolean ext_dsa)3559 _mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer, GLuint srcOffset,
3560 GLuint dstTargetOrName, GLintptr dstOffset,
3561 GLsizeiptr size, GLboolean named,
3562 GLboolean ext_dsa)
3563 {
3564 GET_CURRENT_CONTEXT(ctx);
3565 struct gl_buffer_object *src = (struct gl_buffer_object *)srcBuffer;
3566 struct gl_buffer_object *dst;
3567 const char *func;
3568
3569 /* Handle behavior for all 3 variants. */
3570 if (named && ext_dsa) {
3571 func = "glNamedBufferSubDataEXT";
3572 dst = _mesa_lookup_bufferobj(ctx, dstTargetOrName);
3573 if (!_mesa_handle_bind_buffer_gen(ctx, dstTargetOrName, &dst, func, false))
3574 goto done;
3575 } else if (named) {
3576 func = "glNamedBufferSubData";
3577 dst = _mesa_lookup_bufferobj_err(ctx, dstTargetOrName, func);
3578 if (!dst)
3579 goto done;
3580 } else {
3581 assert(!ext_dsa);
3582 func = "glBufferSubData";
3583 dst = get_buffer(ctx, func, dstTargetOrName, GL_INVALID_OPERATION);
3584 if (!dst)
3585 goto done;
3586 }
3587
3588 if (!validate_buffer_sub_data(ctx, dst, dstOffset, size, func))
3589 goto done; /* the error is already set */
3590
3591 bufferobj_copy_subdata(ctx, src, dst, srcOffset, dstOffset, size);
3592
3593 done:
3594 /* The caller passes the reference to this function, so unreference it. */
3595 _mesa_reference_buffer_object(ctx, &src, NULL);
3596 }
3597
3598 static bool
validate_map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3599 validate_map_buffer_range(struct gl_context *ctx,
3600 struct gl_buffer_object *bufObj, GLintptr offset,
3601 GLsizeiptr length, GLbitfield access,
3602 const char *func)
3603 {
3604 GLbitfield allowed_access;
3605
3606 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, false);
3607
3608 if (offset < 0) {
3609 _mesa_error(ctx, GL_INVALID_VALUE,
3610 "%s(offset %ld < 0)", func, (long) offset);
3611 return false;
3612 }
3613
3614 if (length < 0) {
3615 _mesa_error(ctx, GL_INVALID_VALUE,
3616 "%s(length %ld < 0)", func, (long) length);
3617 return false;
3618 }
3619
3620 /* Page 38 of the PDF of the OpenGL ES 3.0 spec says:
3621 *
3622 * "An INVALID_OPERATION error is generated for any of the following
3623 * conditions:
3624 *
3625 * * <length> is zero."
3626 *
3627 * Additionally, page 94 of the PDF of the OpenGL 4.5 core spec
3628 * (30.10.2014) also says this, so it's no longer allowed for desktop GL,
3629 * either.
3630 */
3631 if (length == 0) {
3632 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(length = 0)", func);
3633 return false;
3634 }
3635
3636 allowed_access = GL_MAP_READ_BIT |
3637 GL_MAP_WRITE_BIT |
3638 GL_MAP_INVALIDATE_RANGE_BIT |
3639 GL_MAP_INVALIDATE_BUFFER_BIT |
3640 GL_MAP_FLUSH_EXPLICIT_BIT |
3641 GL_MAP_UNSYNCHRONIZED_BIT;
3642
3643 if (ctx->Extensions.ARB_buffer_storage) {
3644 allowed_access |= GL_MAP_PERSISTENT_BIT |
3645 GL_MAP_COHERENT_BIT;
3646 }
3647
3648 if (access & ~allowed_access) {
3649 /* generate an error if any bits other than those allowed are set */
3650 _mesa_error(ctx, GL_INVALID_VALUE,
3651 "%s(access has undefined bits set)", func);
3652 return false;
3653 }
3654
3655 if ((access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == 0) {
3656 _mesa_error(ctx, GL_INVALID_OPERATION,
3657 "%s(access indicates neither read or write)", func);
3658 return false;
3659 }
3660
3661 if ((access & GL_MAP_READ_BIT) &&
3662 (access & (GL_MAP_INVALIDATE_RANGE_BIT |
3663 GL_MAP_INVALIDATE_BUFFER_BIT |
3664 GL_MAP_UNSYNCHRONIZED_BIT))) {
3665 _mesa_error(ctx, GL_INVALID_OPERATION,
3666 "%s(read access with disallowed bits)", func);
3667 return false;
3668 }
3669
3670 if ((access & GL_MAP_FLUSH_EXPLICIT_BIT) &&
3671 ((access & GL_MAP_WRITE_BIT) == 0)) {
3672 _mesa_error(ctx, GL_INVALID_OPERATION,
3673 "%s(access has flush explicit without write)", func);
3674 return false;
3675 }
3676
3677 if (access & GL_MAP_READ_BIT &&
3678 !(bufObj->StorageFlags & GL_MAP_READ_BIT)) {
3679 _mesa_error(ctx, GL_INVALID_OPERATION,
3680 "%s(buffer does not allow read access)", func);
3681 return false;
3682 }
3683
3684 if (access & GL_MAP_WRITE_BIT &&
3685 !(bufObj->StorageFlags & GL_MAP_WRITE_BIT)) {
3686 _mesa_error(ctx, GL_INVALID_OPERATION,
3687 "%s(buffer does not allow write access)", func);
3688 return false;
3689 }
3690
3691 if (access & GL_MAP_COHERENT_BIT &&
3692 !(bufObj->StorageFlags & GL_MAP_COHERENT_BIT)) {
3693 _mesa_error(ctx, GL_INVALID_OPERATION,
3694 "%s(buffer does not allow coherent access)", func);
3695 return false;
3696 }
3697
3698 if (access & GL_MAP_PERSISTENT_BIT &&
3699 !(bufObj->StorageFlags & GL_MAP_PERSISTENT_BIT)) {
3700 _mesa_error(ctx, GL_INVALID_OPERATION,
3701 "%s(buffer does not allow persistent access)", func);
3702 return false;
3703 }
3704
3705 if (offset + length > bufObj->Size) {
3706 _mesa_error(ctx, GL_INVALID_VALUE,
3707 "%s(offset %lu + length %lu > buffer_size %lu)", func,
3708 (unsigned long) offset, (unsigned long) length,
3709 (unsigned long) bufObj->Size);
3710 return false;
3711 }
3712
3713 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3714 _mesa_error(ctx, GL_INVALID_OPERATION,
3715 "%s(buffer already mapped)", func);
3716 return false;
3717 }
3718
3719 if (access & GL_MAP_WRITE_BIT) {
3720 bufObj->NumMapBufferWriteCalls++;
3721 if ((bufObj->Usage == GL_STATIC_DRAW ||
3722 bufObj->Usage == GL_STATIC_COPY) &&
3723 bufObj->NumMapBufferWriteCalls >= BUFFER_WARNING_CALL_COUNT) {
3724 BUFFER_USAGE_WARNING(ctx,
3725 "using %s(buffer %u, offset %u, length %u) to "
3726 "update a %s buffer",
3727 func, bufObj->Name, offset, length,
3728 _mesa_enum_to_string(bufObj->Usage));
3729 }
3730 }
3731
3732 return true;
3733 }
3734
3735 static void *
map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3736 map_buffer_range(struct gl_context *ctx, struct gl_buffer_object *bufObj,
3737 GLintptr offset, GLsizeiptr length, GLbitfield access,
3738 const char *func)
3739 {
3740 if (!bufObj->Size) {
3741 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(buffer size = 0)", func);
3742 return NULL;
3743 }
3744
3745 void *map = _mesa_bufferobj_map_range(ctx, offset, length, access, bufObj,
3746 MAP_USER);
3747 if (!map) {
3748 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(map failed)", func);
3749 }
3750 else {
3751 /* The driver callback should have set all these fields.
3752 * This is important because other modules (like VBO) might call
3753 * the driver function directly.
3754 */
3755 assert(bufObj->Mappings[MAP_USER].Pointer == map);
3756 assert(bufObj->Mappings[MAP_USER].Length == length);
3757 assert(bufObj->Mappings[MAP_USER].Offset == offset);
3758 assert(bufObj->Mappings[MAP_USER].AccessFlags == access);
3759 }
3760
3761 if (access & GL_MAP_WRITE_BIT) {
3762 bufObj->Written = GL_TRUE;
3763 bufObj->MinMaxCacheDirty = true;
3764 }
3765
3766 #ifdef VBO_DEBUG
3767 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3768 printf("glMapBuffer(%u, sz %ld, access 0x%x)\n",
3769 bufObj->Name, bufObj->Size, access);
3770 /* Access must be write only */
3771 if ((access & GL_MAP_WRITE_BIT) && (!(access & ~GL_MAP_WRITE_BIT))) {
3772 GLuint i;
3773 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3774 for (i = 0; i < bufObj->Size; i++)
3775 b[i] = i & 0xff;
3776 }
3777 }
3778 #endif
3779
3780 #ifdef BOUNDS_CHECK
3781 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3782 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3783 GLuint i;
3784 /* buffer is 100 bytes larger than requested, fill with magic value */
3785 for (i = 0; i < 100; i++) {
3786 buf[bufObj->Size - i - 1] = 123;
3787 }
3788 }
3789 #endif
3790
3791 return map;
3792 }
3793
3794 void * GLAPIENTRY
_mesa_MapBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3795 _mesa_MapBufferRange_no_error(GLenum target, GLintptr offset,
3796 GLsizeiptr length, GLbitfield access)
3797 {
3798 GET_CURRENT_CONTEXT(ctx);
3799
3800 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
3801 struct gl_buffer_object *bufObj = *bufObjPtr;
3802
3803 return map_buffer_range(ctx, bufObj, offset, length, access,
3804 "glMapBufferRange");
3805 }
3806
3807 void * GLAPIENTRY
_mesa_MapBufferRange(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3808 _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
3809 GLbitfield access)
3810 {
3811 GET_CURRENT_CONTEXT(ctx);
3812 struct gl_buffer_object *bufObj;
3813
3814 if (!ctx->Extensions.ARB_map_buffer_range) {
3815 _mesa_error(ctx, GL_INVALID_OPERATION,
3816 "glMapBufferRange(ARB_map_buffer_range not supported)");
3817 return NULL;
3818 }
3819
3820 bufObj = get_buffer(ctx, "glMapBufferRange", target, GL_INVALID_OPERATION);
3821 if (!bufObj)
3822 return NULL;
3823
3824 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access,
3825 "glMapBufferRange"))
3826 return NULL;
3827
3828 return map_buffer_range(ctx, bufObj, offset, length, access,
3829 "glMapBufferRange");
3830 }
3831
3832 void * GLAPIENTRY
_mesa_MapNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3833 _mesa_MapNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
3834 GLsizeiptr length, GLbitfield access)
3835 {
3836 GET_CURRENT_CONTEXT(ctx);
3837 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3838
3839 return map_buffer_range(ctx, bufObj, offset, length, access,
3840 "glMapNamedBufferRange");
3841 }
3842
3843 static void *
map_named_buffer_range(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access,bool dsa_ext,const char * func)3844 map_named_buffer_range(GLuint buffer, GLintptr offset, GLsizeiptr length,
3845 GLbitfield access, bool dsa_ext, const char *func)
3846 {
3847 GET_CURRENT_CONTEXT(ctx);
3848 struct gl_buffer_object *bufObj = NULL;
3849
3850 if (!ctx->Extensions.ARB_map_buffer_range) {
3851 _mesa_error(ctx, GL_INVALID_OPERATION,
3852 "%s(ARB_map_buffer_range not supported)", func);
3853 return NULL;
3854 }
3855
3856 if (dsa_ext) {
3857 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3858 if (!_mesa_handle_bind_buffer_gen(ctx, buffer, &bufObj, func, false))
3859 return NULL;
3860 } else {
3861 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
3862 if (!bufObj)
3863 return NULL;
3864 }
3865
3866 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access, func))
3867 return NULL;
3868
3869 return map_buffer_range(ctx, bufObj, offset, length, access, func);
3870 }
3871
3872 void * GLAPIENTRY
_mesa_MapNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3873 _mesa_MapNamedBufferRangeEXT(GLuint buffer, GLintptr offset, GLsizeiptr length,
3874 GLbitfield access)
3875 {
3876 GET_CURRENT_CONTEXT(ctx);
3877 if (!buffer) {
3878 _mesa_error(ctx, GL_INVALID_OPERATION,
3879 "glMapNamedBufferRangeEXT(buffer=0)");
3880 return NULL;
3881 }
3882 return map_named_buffer_range(buffer, offset, length, access, true,
3883 "glMapNamedBufferRangeEXT");
3884 }
3885
3886 void * GLAPIENTRY
_mesa_MapNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3887 _mesa_MapNamedBufferRange(GLuint buffer, GLintptr offset, GLsizeiptr length,
3888 GLbitfield access)
3889 {
3890 return map_named_buffer_range(buffer, offset, length, access, false,
3891 "glMapNamedBufferRange");
3892 }
3893
3894 /**
3895 * Converts GLenum access from MapBuffer and MapNamedBuffer into
3896 * flags for input to map_buffer_range.
3897 *
3898 * \return true if the type of requested access is permissible.
3899 */
3900 static bool
get_map_buffer_access_flags(struct gl_context * ctx,GLenum access,GLbitfield * flags)3901 get_map_buffer_access_flags(struct gl_context *ctx, GLenum access,
3902 GLbitfield *flags)
3903 {
3904 switch (access) {
3905 case GL_READ_ONLY_ARB:
3906 *flags = GL_MAP_READ_BIT;
3907 return _mesa_is_desktop_gl(ctx);
3908 case GL_WRITE_ONLY_ARB:
3909 *flags = GL_MAP_WRITE_BIT;
3910 return true;
3911 case GL_READ_WRITE_ARB:
3912 *flags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
3913 return _mesa_is_desktop_gl(ctx);
3914 default:
3915 *flags = 0;
3916 return false;
3917 }
3918 }
3919
3920 void * GLAPIENTRY
_mesa_MapBuffer_no_error(GLenum target,GLenum access)3921 _mesa_MapBuffer_no_error(GLenum target, GLenum access)
3922 {
3923 GET_CURRENT_CONTEXT(ctx);
3924
3925 GLbitfield accessFlags;
3926 get_map_buffer_access_flags(ctx, access, &accessFlags);
3927
3928 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
3929 struct gl_buffer_object *bufObj = *bufObjPtr;
3930
3931 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3932 "glMapBuffer");
3933 }
3934
3935 void * GLAPIENTRY
_mesa_MapBuffer(GLenum target,GLenum access)3936 _mesa_MapBuffer(GLenum target, GLenum access)
3937 {
3938 GET_CURRENT_CONTEXT(ctx);
3939 struct gl_buffer_object *bufObj;
3940 GLbitfield accessFlags;
3941
3942 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3943 _mesa_error(ctx, GL_INVALID_ENUM, "glMapBuffer(invalid access)");
3944 return NULL;
3945 }
3946
3947 bufObj = get_buffer(ctx, "glMapBuffer", target, GL_INVALID_OPERATION);
3948 if (!bufObj)
3949 return NULL;
3950
3951 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3952 "glMapBuffer"))
3953 return NULL;
3954
3955 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3956 "glMapBuffer");
3957 }
3958
3959 void * GLAPIENTRY
_mesa_MapNamedBuffer_no_error(GLuint buffer,GLenum access)3960 _mesa_MapNamedBuffer_no_error(GLuint buffer, GLenum access)
3961 {
3962 GET_CURRENT_CONTEXT(ctx);
3963
3964 GLbitfield accessFlags;
3965 get_map_buffer_access_flags(ctx, access, &accessFlags);
3966
3967 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3968
3969 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3970 "glMapNamedBuffer");
3971 }
3972
3973 void * GLAPIENTRY
_mesa_MapNamedBuffer(GLuint buffer,GLenum access)3974 _mesa_MapNamedBuffer(GLuint buffer, GLenum access)
3975 {
3976 GET_CURRENT_CONTEXT(ctx);
3977 struct gl_buffer_object *bufObj;
3978 GLbitfield accessFlags;
3979
3980 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3981 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBuffer(invalid access)");
3982 return NULL;
3983 }
3984
3985 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glMapNamedBuffer");
3986 if (!bufObj)
3987 return NULL;
3988
3989 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3990 "glMapNamedBuffer"))
3991 return NULL;
3992
3993 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3994 "glMapNamedBuffer");
3995 }
3996
3997 void * GLAPIENTRY
_mesa_MapNamedBufferEXT(GLuint buffer,GLenum access)3998 _mesa_MapNamedBufferEXT(GLuint buffer, GLenum access)
3999 {
4000 GET_CURRENT_CONTEXT(ctx);
4001
4002 GLbitfield accessFlags;
4003 if (!buffer) {
4004 _mesa_error(ctx, GL_INVALID_OPERATION,
4005 "glMapNamedBufferEXT(buffer=0)");
4006 return NULL;
4007 }
4008 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
4009 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBufferEXT(invalid access)");
4010 return NULL;
4011 }
4012
4013 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4014 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
4015 &bufObj, "glMapNamedBufferEXT", false))
4016 return NULL;
4017
4018 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
4019 "glMapNamedBufferEXT"))
4020 return NULL;
4021
4022 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
4023 "glMapNamedBufferEXT");
4024 }
4025
4026 static void
flush_mapped_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,const char * func)4027 flush_mapped_buffer_range(struct gl_context *ctx,
4028 struct gl_buffer_object *bufObj,
4029 GLintptr offset, GLsizeiptr length,
4030 const char *func)
4031 {
4032 if (!ctx->Extensions.ARB_map_buffer_range) {
4033 _mesa_error(ctx, GL_INVALID_OPERATION,
4034 "%s(ARB_map_buffer_range not supported)", func);
4035 return;
4036 }
4037
4038 if (offset < 0) {
4039 _mesa_error(ctx, GL_INVALID_VALUE,
4040 "%s(offset %ld < 0)", func, (long) offset);
4041 return;
4042 }
4043
4044 if (length < 0) {
4045 _mesa_error(ctx, GL_INVALID_VALUE,
4046 "%s(length %ld < 0)", func, (long) length);
4047 return;
4048 }
4049
4050 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
4051 /* buffer is not mapped */
4052 _mesa_error(ctx, GL_INVALID_OPERATION,
4053 "%s(buffer is not mapped)", func);
4054 return;
4055 }
4056
4057 if ((bufObj->Mappings[MAP_USER].AccessFlags &
4058 GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
4059 _mesa_error(ctx, GL_INVALID_OPERATION,
4060 "%s(GL_MAP_FLUSH_EXPLICIT_BIT not set)", func);
4061 return;
4062 }
4063
4064 if (offset + length > bufObj->Mappings[MAP_USER].Length) {
4065 _mesa_error(ctx, GL_INVALID_VALUE,
4066 "%s(offset %ld + length %ld > mapped length %ld)", func,
4067 (long) offset, (long) length,
4068 (long) bufObj->Mappings[MAP_USER].Length);
4069 return;
4070 }
4071
4072 assert(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT);
4073
4074 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4075 MAP_USER);
4076 }
4077
4078 void GLAPIENTRY
_mesa_FlushMappedBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length)4079 _mesa_FlushMappedBufferRange_no_error(GLenum target, GLintptr offset,
4080 GLsizeiptr length)
4081 {
4082 GET_CURRENT_CONTEXT(ctx);
4083 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target);
4084 struct gl_buffer_object *bufObj = *bufObjPtr;
4085
4086 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4087 MAP_USER);
4088 }
4089
4090 void GLAPIENTRY
_mesa_FlushMappedBufferRange(GLenum target,GLintptr offset,GLsizeiptr length)4091 _mesa_FlushMappedBufferRange(GLenum target, GLintptr offset,
4092 GLsizeiptr length)
4093 {
4094 GET_CURRENT_CONTEXT(ctx);
4095 struct gl_buffer_object *bufObj;
4096
4097 bufObj = get_buffer(ctx, "glFlushMappedBufferRange", target,
4098 GL_INVALID_OPERATION);
4099 if (!bufObj)
4100 return;
4101
4102 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4103 "glFlushMappedBufferRange");
4104 }
4105
4106 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)4107 _mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
4108 GLsizeiptr length)
4109 {
4110 GET_CURRENT_CONTEXT(ctx);
4111 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4112
4113 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4114 MAP_USER);
4115 }
4116
4117 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length)4118 _mesa_FlushMappedNamedBufferRange(GLuint buffer, GLintptr offset,
4119 GLsizeiptr length)
4120 {
4121 GET_CURRENT_CONTEXT(ctx);
4122 struct gl_buffer_object *bufObj;
4123
4124 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
4125 "glFlushMappedNamedBufferRange");
4126 if (!bufObj)
4127 return;
4128
4129 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4130 "glFlushMappedNamedBufferRange");
4131 }
4132
4133 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length)4134 _mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer, GLintptr offset,
4135 GLsizeiptr length)
4136 {
4137 GET_CURRENT_CONTEXT(ctx);
4138 struct gl_buffer_object *bufObj;
4139
4140 if (!buffer) {
4141 _mesa_error(ctx, GL_INVALID_OPERATION,
4142 "glFlushMappedNamedBufferRangeEXT(buffer=0)");
4143 return;
4144 }
4145
4146 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4147 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
4148 &bufObj, "glFlushMappedNamedBufferRangeEXT", false))
4149 return;
4150
4151 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4152 "glFlushMappedNamedBufferRangeEXT");
4153 }
4154
4155 static void
bind_buffer_range_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4156 bind_buffer_range_uniform_buffer(struct gl_context *ctx, GLuint index,
4157 struct gl_buffer_object *bufObj,
4158 GLintptr offset, GLsizeiptr size)
4159 {
4160 if (!bufObj) {
4161 offset = -1;
4162 size = -1;
4163 }
4164
4165 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
4166 bind_uniform_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4167 }
4168
4169 /**
4170 * Bind a region of a buffer object to a uniform block binding point.
4171 * \param index the uniform buffer binding point index
4172 * \param bufObj the buffer object
4173 * \param offset offset to the start of buffer object region
4174 * \param size size of the buffer object region
4175 */
4176 static void
bind_buffer_range_uniform_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4177 bind_buffer_range_uniform_buffer_err(struct gl_context *ctx, GLuint index,
4178 struct gl_buffer_object *bufObj,
4179 GLintptr offset, GLsizeiptr size)
4180 {
4181 if (index >= ctx->Const.MaxUniformBufferBindings) {
4182 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4183 return;
4184 }
4185
4186 if (offset & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4187 _mesa_error(ctx, GL_INVALID_VALUE,
4188 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4189 ctx->Const.UniformBufferOffsetAlignment);
4190 return;
4191 }
4192
4193 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4194 }
4195
4196 static void
bind_buffer_range_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4197 bind_buffer_range_shader_storage_buffer(struct gl_context *ctx,
4198 GLuint index,
4199 struct gl_buffer_object *bufObj,
4200 GLintptr offset,
4201 GLsizeiptr size)
4202 {
4203 if (!bufObj) {
4204 offset = -1;
4205 size = -1;
4206 }
4207
4208 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
4209 bind_shader_storage_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4210 }
4211
4212 /**
4213 * Bind a region of a buffer object to a shader storage block binding point.
4214 * \param index the shader storage buffer binding point index
4215 * \param bufObj the buffer object
4216 * \param offset offset to the start of buffer object region
4217 * \param size size of the buffer object region
4218 */
4219 static void
bind_buffer_range_shader_storage_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4220 bind_buffer_range_shader_storage_buffer_err(struct gl_context *ctx,
4221 GLuint index,
4222 struct gl_buffer_object *bufObj,
4223 GLintptr offset, GLsizeiptr size)
4224 {
4225 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
4226 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4227 return;
4228 }
4229
4230 if (offset & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4231 _mesa_error(ctx, GL_INVALID_VALUE,
4232 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4233 ctx->Const.ShaderStorageBufferOffsetAlignment);
4234 return;
4235 }
4236
4237 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset, size);
4238 }
4239
4240 static void
bind_buffer_range_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4241 bind_buffer_range_atomic_buffer(struct gl_context *ctx, GLuint index,
4242 struct gl_buffer_object *bufObj,
4243 GLintptr offset, GLsizeiptr size)
4244 {
4245 if (!bufObj) {
4246 offset = -1;
4247 size = -1;
4248 }
4249
4250 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
4251 bind_atomic_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4252 }
4253
4254 /**
4255 * Bind a region of a buffer object to an atomic storage block binding point.
4256 * \param index the shader storage buffer binding point index
4257 * \param bufObj the buffer object
4258 * \param offset offset to the start of buffer object region
4259 * \param size size of the buffer object region
4260 */
4261 static void
bind_buffer_range_atomic_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4262 bind_buffer_range_atomic_buffer_err(struct gl_context *ctx,
4263 GLuint index,
4264 struct gl_buffer_object *bufObj,
4265 GLintptr offset, GLsizeiptr size)
4266 {
4267 if (index >= ctx->Const.MaxAtomicBufferBindings) {
4268 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4269 return;
4270 }
4271
4272 if (offset & (ATOMIC_COUNTER_SIZE - 1)) {
4273 _mesa_error(ctx, GL_INVALID_VALUE,
4274 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4275 ATOMIC_COUNTER_SIZE);
4276 return;
4277 }
4278
4279 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4280 }
4281
4282 static inline bool
bind_buffers_check_offset_and_size(struct gl_context * ctx,GLuint index,const GLintptr * offsets,const GLsizeiptr * sizes)4283 bind_buffers_check_offset_and_size(struct gl_context *ctx,
4284 GLuint index,
4285 const GLintptr *offsets,
4286 const GLsizeiptr *sizes)
4287 {
4288 if (offsets[index] < 0) {
4289 /* The ARB_multi_bind spec says:
4290 *
4291 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4292 * value in <offsets> is less than zero (per binding)."
4293 */
4294 _mesa_error(ctx, GL_INVALID_VALUE,
4295 "glBindBuffersRange(offsets[%u]=%" PRId64 " < 0)",
4296 index, (int64_t) offsets[index]);
4297 return false;
4298 }
4299
4300 if (sizes[index] <= 0) {
4301 /* The ARB_multi_bind spec says:
4302 *
4303 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4304 * value in <sizes> is less than or equal to zero (per binding)."
4305 */
4306 _mesa_error(ctx, GL_INVALID_VALUE,
4307 "glBindBuffersRange(sizes[%u]=%" PRId64 " <= 0)",
4308 index, (int64_t) sizes[index]);
4309 return false;
4310 }
4311
4312 return true;
4313 }
4314
4315 static bool
error_check_bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4316 error_check_bind_uniform_buffers(struct gl_context *ctx,
4317 GLuint first, GLsizei count,
4318 const char *caller)
4319 {
4320 if (!ctx->Extensions.ARB_uniform_buffer_object) {
4321 _mesa_error(ctx, GL_INVALID_ENUM,
4322 "%s(target=GL_UNIFORM_BUFFER)", caller);
4323 return false;
4324 }
4325
4326 /* The ARB_multi_bind_spec says:
4327 *
4328 * "An INVALID_OPERATION error is generated if <first> + <count> is
4329 * greater than the number of target-specific indexed binding points,
4330 * as described in section 6.7.1."
4331 */
4332 if (first + count > ctx->Const.MaxUniformBufferBindings) {
4333 _mesa_error(ctx, GL_INVALID_OPERATION,
4334 "%s(first=%u + count=%d > the value of "
4335 "GL_MAX_UNIFORM_BUFFER_BINDINGS=%u)",
4336 caller, first, count,
4337 ctx->Const.MaxUniformBufferBindings);
4338 return false;
4339 }
4340
4341 return true;
4342 }
4343
4344 static bool
error_check_bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4345 error_check_bind_shader_storage_buffers(struct gl_context *ctx,
4346 GLuint first, GLsizei count,
4347 const char *caller)
4348 {
4349 if (!ctx->Extensions.ARB_shader_storage_buffer_object) {
4350 _mesa_error(ctx, GL_INVALID_ENUM,
4351 "%s(target=GL_SHADER_STORAGE_BUFFER)", caller);
4352 return false;
4353 }
4354
4355 /* The ARB_multi_bind_spec says:
4356 *
4357 * "An INVALID_OPERATION error is generated if <first> + <count> is
4358 * greater than the number of target-specific indexed binding points,
4359 * as described in section 6.7.1."
4360 */
4361 if (first + count > ctx->Const.MaxShaderStorageBufferBindings) {
4362 _mesa_error(ctx, GL_INVALID_OPERATION,
4363 "%s(first=%u + count=%d > the value of "
4364 "GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS=%u)",
4365 caller, first, count,
4366 ctx->Const.MaxShaderStorageBufferBindings);
4367 return false;
4368 }
4369
4370 return true;
4371 }
4372
4373 /**
4374 * Unbind all uniform buffers in the range
4375 * <first> through <first>+<count>-1
4376 */
4377 static void
unbind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4378 unbind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4379 {
4380 for (int i = 0; i < count; i++)
4381 set_buffer_binding(ctx, &ctx->UniformBufferBindings[first + i],
4382 NULL, -1, -1, GL_TRUE, 0);
4383 }
4384
4385 /**
4386 * Unbind all shader storage buffers in the range
4387 * <first> through <first>+<count>-1
4388 */
4389 static void
unbind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4390 unbind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4391 GLsizei count)
4392 {
4393 for (int i = 0; i < count; i++)
4394 set_buffer_binding(ctx, &ctx->ShaderStorageBufferBindings[first + i],
4395 NULL, -1, -1, GL_TRUE, 0);
4396 }
4397
4398 static void
bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4399 bind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count,
4400 const GLuint *buffers,
4401 bool range,
4402 const GLintptr *offsets, const GLsizeiptr *sizes,
4403 const char *caller)
4404 {
4405 if (!error_check_bind_uniform_buffers(ctx, first, count, caller))
4406 return;
4407
4408 /* Assume that at least one binding will be changed */
4409 FLUSH_VERTICES(ctx, 0, 0);
4410 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
4411
4412 if (!buffers) {
4413 /* The ARB_multi_bind spec says:
4414 *
4415 * "If <buffers> is NULL, all bindings from <first> through
4416 * <first>+<count>-1 are reset to their unbound (zero) state.
4417 * In this case, the offsets and sizes associated with the
4418 * binding points are set to default values, ignoring
4419 * <offsets> and <sizes>."
4420 */
4421 unbind_uniform_buffers(ctx, first, count);
4422 return;
4423 }
4424
4425 /* Note that the error semantics for multi-bind commands differ from
4426 * those of other GL commands.
4427 *
4428 * The Issues section in the ARB_multi_bind spec says:
4429 *
4430 * "(11) Typically, OpenGL specifies that if an error is generated by a
4431 * command, that command has no effect. This is somewhat
4432 * unfortunate for multi-bind commands, because it would require a
4433 * first pass to scan the entire list of bound objects for errors
4434 * and then a second pass to actually perform the bindings.
4435 * Should we have different error semantics?
4436 *
4437 * RESOLVED: Yes. In this specification, when the parameters for
4438 * one of the <count> binding points are invalid, that binding point
4439 * is not updated and an error will be generated. However, other
4440 * binding points in the same command will be updated if their
4441 * parameters are valid and no other error occurs."
4442 */
4443
4444 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
4445 ctx->BufferObjectsLocked);
4446
4447 for (int i = 0; i < count; i++) {
4448 struct gl_buffer_binding *binding =
4449 &ctx->UniformBufferBindings[first + i];
4450 GLintptr offset = 0;
4451 GLsizeiptr size = 0;
4452
4453 if (range) {
4454 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4455 continue;
4456
4457 /* The ARB_multi_bind spec says:
4458 *
4459 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4460 * pair of values in <offsets> and <sizes> does not respectively
4461 * satisfy the constraints described for those parameters for the
4462 * specified target, as described in section 6.7.1 (per binding)."
4463 *
4464 * Section 6.7.1 refers to table 6.5, which says:
4465 *
4466 * "┌───────────────────────────────────────────────────────────────┐
4467 * │ Uniform buffer array bindings (see sec. 7.6) │
4468 * ├─────────────────────┬─────────────────────────────────────────┤
4469 * │ ... │ ... │
4470 * │ offset restriction │ multiple of value of UNIFORM_BUFFER_- │
4471 * │ │ OFFSET_ALIGNMENT │
4472 * │ ... │ ... │
4473 * │ size restriction │ none │
4474 * └─────────────────────┴─────────────────────────────────────────┘"
4475 */
4476 if (offsets[i] & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4477 _mesa_error(ctx, GL_INVALID_VALUE,
4478 "glBindBuffersRange(offsets[%u]=%" PRId64
4479 " is misaligned; it must be a multiple of the value of "
4480 "GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT=%u when "
4481 "target=GL_UNIFORM_BUFFER)",
4482 i, (int64_t) offsets[i],
4483 ctx->Const.UniformBufferOffsetAlignment);
4484 continue;
4485 }
4486
4487 offset = offsets[i];
4488 size = sizes[i];
4489 }
4490
4491 set_buffer_multi_binding(ctx, buffers, i, caller,
4492 binding, offset, size, range,
4493 USAGE_UNIFORM_BUFFER);
4494 }
4495
4496 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
4497 ctx->BufferObjectsLocked);
4498 }
4499
4500 static void
bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4501 bind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4502 GLsizei count, const GLuint *buffers,
4503 bool range,
4504 const GLintptr *offsets,
4505 const GLsizeiptr *sizes,
4506 const char *caller)
4507 {
4508 if (!error_check_bind_shader_storage_buffers(ctx, first, count, caller))
4509 return;
4510
4511 /* Assume that at least one binding will be changed */
4512 FLUSH_VERTICES(ctx, 0, 0);
4513 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
4514
4515 if (!buffers) {
4516 /* The ARB_multi_bind spec says:
4517 *
4518 * "If <buffers> is NULL, all bindings from <first> through
4519 * <first>+<count>-1 are reset to their unbound (zero) state.
4520 * In this case, the offsets and sizes associated with the
4521 * binding points are set to default values, ignoring
4522 * <offsets> and <sizes>."
4523 */
4524 unbind_shader_storage_buffers(ctx, first, count);
4525 return;
4526 }
4527
4528 /* Note that the error semantics for multi-bind commands differ from
4529 * those of other GL commands.
4530 *
4531 * The Issues section in the ARB_multi_bind spec says:
4532 *
4533 * "(11) Typically, OpenGL specifies that if an error is generated by a
4534 * command, that command has no effect. This is somewhat
4535 * unfortunate for multi-bind commands, because it would require a
4536 * first pass to scan the entire list of bound objects for errors
4537 * and then a second pass to actually perform the bindings.
4538 * Should we have different error semantics?
4539 *
4540 * RESOLVED: Yes. In this specification, when the parameters for
4541 * one of the <count> binding points are invalid, that binding point
4542 * is not updated and an error will be generated. However, other
4543 * binding points in the same command will be updated if their
4544 * parameters are valid and no other error occurs."
4545 */
4546
4547 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
4548 ctx->BufferObjectsLocked);
4549
4550 for (int i = 0; i < count; i++) {
4551 struct gl_buffer_binding *binding =
4552 &ctx->ShaderStorageBufferBindings[first + i];
4553 GLintptr offset = 0;
4554 GLsizeiptr size = 0;
4555
4556 if (range) {
4557 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4558 continue;
4559
4560 /* The ARB_multi_bind spec says:
4561 *
4562 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4563 * pair of values in <offsets> and <sizes> does not respectively
4564 * satisfy the constraints described for those parameters for the
4565 * specified target, as described in section 6.7.1 (per binding)."
4566 *
4567 * Section 6.7.1 refers to table 6.5, which says:
4568 *
4569 * "┌───────────────────────────────────────────────────────────────┐
4570 * │ Shader storage buffer array bindings (see sec. 7.8) │
4571 * ├─────────────────────┬─────────────────────────────────────────┤
4572 * │ ... │ ... │
4573 * │ offset restriction │ multiple of value of SHADER_STORAGE_- │
4574 * │ │ BUFFER_OFFSET_ALIGNMENT │
4575 * │ ... │ ... │
4576 * │ size restriction │ none │
4577 * └─────────────────────┴─────────────────────────────────────────┘"
4578 */
4579 if (offsets[i] & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4580 _mesa_error(ctx, GL_INVALID_VALUE,
4581 "glBindBuffersRange(offsets[%u]=%" PRId64
4582 " is misaligned; it must be a multiple of the value of "
4583 "GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT=%u when "
4584 "target=GL_SHADER_STORAGE_BUFFER)",
4585 i, (int64_t) offsets[i],
4586 ctx->Const.ShaderStorageBufferOffsetAlignment);
4587 continue;
4588 }
4589
4590 offset = offsets[i];
4591 size = sizes[i];
4592 }
4593
4594 set_buffer_multi_binding(ctx, buffers, i, caller,
4595 binding, offset, size, range,
4596 USAGE_SHADER_STORAGE_BUFFER);
4597 }
4598
4599 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
4600 ctx->BufferObjectsLocked);
4601 }
4602
4603 static bool
error_check_bind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count,const char * caller)4604 error_check_bind_xfb_buffers(struct gl_context *ctx,
4605 struct gl_transform_feedback_object *tfObj,
4606 GLuint first, GLsizei count, const char *caller)
4607 {
4608 if (!ctx->Extensions.EXT_transform_feedback) {
4609 _mesa_error(ctx, GL_INVALID_ENUM,
4610 "%s(target=GL_TRANSFORM_FEEDBACK_BUFFER)", caller);
4611 return false;
4612 }
4613
4614 /* Page 398 of the PDF of the OpenGL 4.4 (Core Profile) spec says:
4615 *
4616 * "An INVALID_OPERATION error is generated :
4617 *
4618 * ...
4619 * • by BindBufferRange or BindBufferBase if target is TRANSFORM_-
4620 * FEEDBACK_BUFFER and transform feedback is currently active."
4621 *
4622 * We assume that this is also meant to apply to BindBuffersRange
4623 * and BindBuffersBase.
4624 */
4625 if (tfObj->Active) {
4626 _mesa_error(ctx, GL_INVALID_OPERATION,
4627 "%s(Changing transform feedback buffers while "
4628 "transform feedback is active)", caller);
4629 return false;
4630 }
4631
4632 /* The ARB_multi_bind_spec says:
4633 *
4634 * "An INVALID_OPERATION error is generated if <first> + <count> is
4635 * greater than the number of target-specific indexed binding points,
4636 * as described in section 6.7.1."
4637 */
4638 if (first + count > ctx->Const.MaxTransformFeedbackBuffers) {
4639 _mesa_error(ctx, GL_INVALID_OPERATION,
4640 "%s(first=%u + count=%d > the value of "
4641 "GL_MAX_TRANSFORM_FEEDBACK_BUFFERS=%u)",
4642 caller, first, count,
4643 ctx->Const.MaxTransformFeedbackBuffers);
4644 return false;
4645 }
4646
4647 return true;
4648 }
4649
4650 /**
4651 * Unbind all transform feedback buffers in the range
4652 * <first> through <first>+<count>-1
4653 */
4654 static void
unbind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count)4655 unbind_xfb_buffers(struct gl_context *ctx,
4656 struct gl_transform_feedback_object *tfObj,
4657 GLuint first, GLsizei count)
4658 {
4659 for (int i = 0; i < count; i++)
4660 _mesa_set_transform_feedback_binding(ctx, tfObj, first + i,
4661 NULL, 0, 0);
4662 }
4663
4664 static void
bind_xfb_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4665 bind_xfb_buffers(struct gl_context *ctx,
4666 GLuint first, GLsizei count,
4667 const GLuint *buffers,
4668 bool range,
4669 const GLintptr *offsets,
4670 const GLsizeiptr *sizes,
4671 const char *caller)
4672 {
4673 struct gl_transform_feedback_object *tfObj =
4674 ctx->TransformFeedback.CurrentObject;
4675
4676 if (!error_check_bind_xfb_buffers(ctx, tfObj, first, count, caller))
4677 return;
4678
4679 /* Assume that at least one binding will be changed */
4680 FLUSH_VERTICES(ctx, 0, 0);
4681
4682 if (!buffers) {
4683 /* The ARB_multi_bind spec says:
4684 *
4685 * "If <buffers> is NULL, all bindings from <first> through
4686 * <first>+<count>-1 are reset to their unbound (zero) state.
4687 * In this case, the offsets and sizes associated with the
4688 * binding points are set to default values, ignoring
4689 * <offsets> and <sizes>."
4690 */
4691 unbind_xfb_buffers(ctx, tfObj, first, count);
4692 return;
4693 }
4694
4695 /* Note that the error semantics for multi-bind commands differ from
4696 * those of other GL commands.
4697 *
4698 * The Issues section in the ARB_multi_bind spec says:
4699 *
4700 * "(11) Typically, OpenGL specifies that if an error is generated by a
4701 * command, that command has no effect. This is somewhat
4702 * unfortunate for multi-bind commands, because it would require a
4703 * first pass to scan the entire list of bound objects for errors
4704 * and then a second pass to actually perform the bindings.
4705 * Should we have different error semantics?
4706 *
4707 * RESOLVED: Yes. In this specification, when the parameters for
4708 * one of the <count> binding points are invalid, that binding point
4709 * is not updated and an error will be generated. However, other
4710 * binding points in the same command will be updated if their
4711 * parameters are valid and no other error occurs."
4712 */
4713
4714 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
4715 ctx->BufferObjectsLocked);
4716
4717 for (int i = 0; i < count; i++) {
4718 const GLuint index = first + i;
4719 struct gl_buffer_object * const boundBufObj = tfObj->Buffers[index];
4720 struct gl_buffer_object *bufObj;
4721 GLintptr offset = 0;
4722 GLsizeiptr size = 0;
4723
4724 if (range) {
4725 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4726 continue;
4727
4728 /* The ARB_multi_bind spec says:
4729 *
4730 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4731 * pair of values in <offsets> and <sizes> does not respectively
4732 * satisfy the constraints described for those parameters for the
4733 * specified target, as described in section 6.7.1 (per binding)."
4734 *
4735 * Section 6.7.1 refers to table 6.5, which says:
4736 *
4737 * "┌───────────────────────────────────────────────────────────────┐
4738 * │ Transform feedback array bindings (see sec. 13.2.2) │
4739 * ├───────────────────────┬───────────────────────────────────────┤
4740 * │ ... │ ... │
4741 * │ offset restriction │ multiple of 4 │
4742 * │ ... │ ... │
4743 * │ size restriction │ multiple of 4 │
4744 * └───────────────────────┴───────────────────────────────────────┘"
4745 */
4746 if (offsets[i] & 0x3) {
4747 _mesa_error(ctx, GL_INVALID_VALUE,
4748 "glBindBuffersRange(offsets[%u]=%" PRId64
4749 " is misaligned; it must be a multiple of 4 when "
4750 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4751 i, (int64_t) offsets[i]);
4752 continue;
4753 }
4754
4755 if (sizes[i] & 0x3) {
4756 _mesa_error(ctx, GL_INVALID_VALUE,
4757 "glBindBuffersRange(sizes[%u]=%" PRId64
4758 " is misaligned; it must be a multiple of 4 when "
4759 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4760 i, (int64_t) sizes[i]);
4761 continue;
4762 }
4763
4764 offset = offsets[i];
4765 size = sizes[i];
4766 }
4767
4768 if (boundBufObj && boundBufObj->Name == buffers[i])
4769 bufObj = boundBufObj;
4770 else {
4771 bool error;
4772 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, i, caller,
4773 &error);
4774 if (error)
4775 continue;
4776 }
4777
4778 _mesa_set_transform_feedback_binding(ctx, tfObj, index, bufObj,
4779 offset, size);
4780 }
4781
4782 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
4783 ctx->BufferObjectsLocked);
4784 }
4785
4786 static bool
error_check_bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4787 error_check_bind_atomic_buffers(struct gl_context *ctx,
4788 GLuint first, GLsizei count,
4789 const char *caller)
4790 {
4791 if (!ctx->Extensions.ARB_shader_atomic_counters) {
4792 _mesa_error(ctx, GL_INVALID_ENUM,
4793 "%s(target=GL_ATOMIC_COUNTER_BUFFER)", caller);
4794 return false;
4795 }
4796
4797 /* The ARB_multi_bind_spec says:
4798 *
4799 * "An INVALID_OPERATION error is generated if <first> + <count> is
4800 * greater than the number of target-specific indexed binding points,
4801 * as described in section 6.7.1."
4802 */
4803 if (first + count > ctx->Const.MaxAtomicBufferBindings) {
4804 _mesa_error(ctx, GL_INVALID_OPERATION,
4805 "%s(first=%u + count=%d > the value of "
4806 "GL_MAX_ATOMIC_BUFFER_BINDINGS=%u)",
4807 caller, first, count, ctx->Const.MaxAtomicBufferBindings);
4808 return false;
4809 }
4810
4811 return true;
4812 }
4813
4814 /**
4815 * Unbind all atomic counter buffers in the range
4816 * <first> through <first>+<count>-1
4817 */
4818 static void
unbind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4819 unbind_atomic_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4820 {
4821 for (int i = 0; i < count; i++)
4822 set_buffer_binding(ctx, &ctx->AtomicBufferBindings[first + i],
4823 NULL, -1, -1, GL_TRUE, 0);
4824 }
4825
4826 static void
bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4827 bind_atomic_buffers(struct gl_context *ctx,
4828 GLuint first,
4829 GLsizei count,
4830 const GLuint *buffers,
4831 bool range,
4832 const GLintptr *offsets,
4833 const GLsizeiptr *sizes,
4834 const char *caller)
4835 {
4836 if (!error_check_bind_atomic_buffers(ctx, first, count, caller))
4837 return;
4838
4839 /* Assume that at least one binding will be changed */
4840 FLUSH_VERTICES(ctx, 0, 0);
4841 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
4842
4843 if (!buffers) {
4844 /* The ARB_multi_bind spec says:
4845 *
4846 * "If <buffers> is NULL, all bindings from <first> through
4847 * <first>+<count>-1 are reset to their unbound (zero) state.
4848 * In this case, the offsets and sizes associated with the
4849 * binding points are set to default values, ignoring
4850 * <offsets> and <sizes>."
4851 */
4852 unbind_atomic_buffers(ctx, first, count);
4853 return;
4854 }
4855
4856 /* Note that the error semantics for multi-bind commands differ from
4857 * those of other GL commands.
4858 *
4859 * The Issues section in the ARB_multi_bind spec says:
4860 *
4861 * "(11) Typically, OpenGL specifies that if an error is generated by a
4862 * command, that command has no effect. This is somewhat
4863 * unfortunate for multi-bind commands, because it would require a
4864 * first pass to scan the entire list of bound objects for errors
4865 * and then a second pass to actually perform the bindings.
4866 * Should we have different error semantics?
4867 *
4868 * RESOLVED: Yes. In this specification, when the parameters for
4869 * one of the <count> binding points are invalid, that binding point
4870 * is not updated and an error will be generated. However, other
4871 * binding points in the same command will be updated if their
4872 * parameters are valid and no other error occurs."
4873 */
4874
4875 _mesa_HashLockMaybeLocked(ctx->Shared->BufferObjects,
4876 ctx->BufferObjectsLocked);
4877
4878 for (int i = 0; i < count; i++) {
4879 struct gl_buffer_binding *binding =
4880 &ctx->AtomicBufferBindings[first + i];
4881 GLintptr offset = 0;
4882 GLsizeiptr size = 0;
4883
4884 if (range) {
4885 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4886 continue;
4887
4888 /* The ARB_multi_bind spec says:
4889 *
4890 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4891 * pair of values in <offsets> and <sizes> does not respectively
4892 * satisfy the constraints described for those parameters for the
4893 * specified target, as described in section 6.7.1 (per binding)."
4894 *
4895 * Section 6.7.1 refers to table 6.5, which says:
4896 *
4897 * "┌───────────────────────────────────────────────────────────────┐
4898 * │ Atomic counter array bindings (see sec. 7.7.2) │
4899 * ├───────────────────────┬───────────────────────────────────────┤
4900 * │ ... │ ... │
4901 * │ offset restriction │ multiple of 4 │
4902 * │ ... │ ... │
4903 * │ size restriction │ none │
4904 * └───────────────────────┴───────────────────────────────────────┘"
4905 */
4906 if (offsets[i] & (ATOMIC_COUNTER_SIZE - 1)) {
4907 _mesa_error(ctx, GL_INVALID_VALUE,
4908 "glBindBuffersRange(offsets[%u]=%" PRId64
4909 " is misaligned; it must be a multiple of %d when "
4910 "target=GL_ATOMIC_COUNTER_BUFFER)",
4911 i, (int64_t) offsets[i], ATOMIC_COUNTER_SIZE);
4912 continue;
4913 }
4914
4915 offset = offsets[i];
4916 size = sizes[i];
4917 }
4918
4919 set_buffer_multi_binding(ctx, buffers, i, caller,
4920 binding, offset, size, range,
4921 USAGE_ATOMIC_COUNTER_BUFFER);
4922 }
4923
4924 _mesa_HashUnlockMaybeLocked(ctx->Shared->BufferObjects,
4925 ctx->BufferObjectsLocked);
4926 }
4927
4928 static ALWAYS_INLINE void
bind_buffer_range(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size,bool no_error)4929 bind_buffer_range(GLenum target, GLuint index, GLuint buffer, GLintptr offset,
4930 GLsizeiptr size, bool no_error)
4931 {
4932 GET_CURRENT_CONTEXT(ctx);
4933 struct gl_buffer_object *bufObj;
4934
4935 if (MESA_VERBOSE & VERBOSE_API) {
4936 _mesa_debug(ctx, "glBindBufferRange(%s, %u, %u, %lu, %lu)\n",
4937 _mesa_enum_to_string(target), index, buffer,
4938 (unsigned long) offset, (unsigned long) size);
4939 }
4940
4941 if (buffer == 0) {
4942 bufObj = NULL;
4943 } else {
4944 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4945 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
4946 &bufObj, "glBindBufferRange", false))
4947 return;
4948
4949 if (!no_error && !bufObj) {
4950 _mesa_error(ctx, GL_INVALID_OPERATION,
4951 "glBindBufferRange(invalid buffer=%u)", buffer);
4952 return;
4953 }
4954 }
4955
4956 if (no_error) {
4957 switch (target) {
4958 case GL_TRANSFORM_FEEDBACK_BUFFER:
4959 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4960 index, bufObj, offset, size);
4961 return;
4962 case GL_UNIFORM_BUFFER:
4963 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4964 return;
4965 case GL_SHADER_STORAGE_BUFFER:
4966 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset,
4967 size);
4968 return;
4969 case GL_ATOMIC_COUNTER_BUFFER:
4970 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4971 return;
4972 default:
4973 unreachable("invalid BindBufferRange target with KHR_no_error");
4974 }
4975 } else {
4976 if (buffer != 0) {
4977 if (size <= 0) {
4978 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(size=%d)",
4979 (int) size);
4980 return;
4981 }
4982 }
4983
4984 switch (target) {
4985 case GL_TRANSFORM_FEEDBACK_BUFFER:
4986 if (!_mesa_validate_buffer_range_xfb(ctx,
4987 ctx->TransformFeedback.CurrentObject,
4988 index, bufObj, offset, size,
4989 false))
4990 return;
4991
4992 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4993 index, bufObj, offset, size);
4994 return;
4995 case GL_UNIFORM_BUFFER:
4996 bind_buffer_range_uniform_buffer_err(ctx, index, bufObj, offset,
4997 size);
4998 return;
4999 case GL_SHADER_STORAGE_BUFFER:
5000 bind_buffer_range_shader_storage_buffer_err(ctx, index, bufObj,
5001 offset, size);
5002 return;
5003 case GL_ATOMIC_COUNTER_BUFFER:
5004 bind_buffer_range_atomic_buffer_err(ctx, index, bufObj,
5005 offset, size);
5006 return;
5007 default:
5008 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferRange(target)");
5009 return;
5010 }
5011 }
5012 }
5013
5014 void GLAPIENTRY
_mesa_BindBufferRange_no_error(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)5015 _mesa_BindBufferRange_no_error(GLenum target, GLuint index, GLuint buffer,
5016 GLintptr offset, GLsizeiptr size)
5017 {
5018 bind_buffer_range(target, index, buffer, offset, size, true);
5019 }
5020
5021 void GLAPIENTRY
_mesa_BindBufferRange(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)5022 _mesa_BindBufferRange(GLenum target, GLuint index,
5023 GLuint buffer, GLintptr offset, GLsizeiptr size)
5024 {
5025 bind_buffer_range(target, index, buffer, offset, size, false);
5026 }
5027
5028 void GLAPIENTRY
_mesa_BindBufferBase(GLenum target,GLuint index,GLuint buffer)5029 _mesa_BindBufferBase(GLenum target, GLuint index, GLuint buffer)
5030 {
5031 GET_CURRENT_CONTEXT(ctx);
5032 struct gl_buffer_object *bufObj;
5033
5034 if (MESA_VERBOSE & VERBOSE_API) {
5035 _mesa_debug(ctx, "glBindBufferBase(%s, %u, %u)\n",
5036 _mesa_enum_to_string(target), index, buffer);
5037 }
5038
5039 if (buffer == 0) {
5040 bufObj = NULL;
5041 } else {
5042 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5043 if (!_mesa_handle_bind_buffer_gen(ctx, buffer,
5044 &bufObj, "glBindBufferBase", false))
5045 return;
5046
5047 if (!bufObj) {
5048 _mesa_error(ctx, GL_INVALID_OPERATION,
5049 "glBindBufferBase(invalid buffer=%u)", buffer);
5050 return;
5051 }
5052 }
5053
5054 /* Note that there's some oddness in the GL 3.1-GL 3.3 specifications with
5055 * regards to BindBufferBase. It says (GL 3.1 core spec, page 63):
5056 *
5057 * "BindBufferBase is equivalent to calling BindBufferRange with offset
5058 * zero and size equal to the size of buffer."
5059 *
5060 * but it says for glGetIntegeri_v (GL 3.1 core spec, page 230):
5061 *
5062 * "If the parameter (starting offset or size) was not specified when the
5063 * buffer object was bound, zero is returned."
5064 *
5065 * What happens if the size of the buffer changes? Does the size of the
5066 * buffer at the moment glBindBufferBase was called still play a role, like
5067 * the first quote would imply, or is the size meaningless in the
5068 * glBindBufferBase case like the second quote would suggest? The GL 4.1
5069 * core spec page 45 says:
5070 *
5071 * "It is equivalent to calling BindBufferRange with offset zero, while
5072 * size is determined by the size of the bound buffer at the time the
5073 * binding is used."
5074 *
5075 * My interpretation is that the GL 4.1 spec was a clarification of the
5076 * behavior, not a change. In particular, this choice will only make
5077 * rendering work in cases where it would have had undefined results.
5078 */
5079
5080 switch (target) {
5081 case GL_TRANSFORM_FEEDBACK_BUFFER:
5082 _mesa_bind_buffer_base_transform_feedback(ctx,
5083 ctx->TransformFeedback.CurrentObject,
5084 index, bufObj, false);
5085 return;
5086 case GL_UNIFORM_BUFFER:
5087 bind_buffer_base_uniform_buffer(ctx, index, bufObj);
5088 return;
5089 case GL_SHADER_STORAGE_BUFFER:
5090 bind_buffer_base_shader_storage_buffer(ctx, index, bufObj);
5091 return;
5092 case GL_ATOMIC_COUNTER_BUFFER:
5093 bind_buffer_base_atomic_buffer(ctx, index, bufObj);
5094 return;
5095 default:
5096 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferBase(target)");
5097 return;
5098 }
5099 }
5100
5101 void GLAPIENTRY
_mesa_BindBuffersRange(GLenum target,GLuint first,GLsizei count,const GLuint * buffers,const GLintptr * offsets,const GLsizeiptr * sizes)5102 _mesa_BindBuffersRange(GLenum target, GLuint first, GLsizei count,
5103 const GLuint *buffers,
5104 const GLintptr *offsets, const GLsizeiptr *sizes)
5105 {
5106 GET_CURRENT_CONTEXT(ctx);
5107
5108 if (MESA_VERBOSE & VERBOSE_API) {
5109 _mesa_debug(ctx, "glBindBuffersRange(%s, %u, %d, %p, %p, %p)\n",
5110 _mesa_enum_to_string(target), first, count,
5111 buffers, offsets, sizes);
5112 }
5113
5114 switch (target) {
5115 case GL_TRANSFORM_FEEDBACK_BUFFER:
5116 bind_xfb_buffers(ctx, first, count, buffers, true, offsets, sizes,
5117 "glBindBuffersRange");
5118 return;
5119 case GL_UNIFORM_BUFFER:
5120 bind_uniform_buffers(ctx, first, count, buffers, true, offsets, sizes,
5121 "glBindBuffersRange");
5122 return;
5123 case GL_SHADER_STORAGE_BUFFER:
5124 bind_shader_storage_buffers(ctx, first, count, buffers, true, offsets, sizes,
5125 "glBindBuffersRange");
5126 return;
5127 case GL_ATOMIC_COUNTER_BUFFER:
5128 bind_atomic_buffers(ctx, first, count, buffers, true, offsets, sizes,
5129 "glBindBuffersRange");
5130 return;
5131 default:
5132 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersRange(target=%s)",
5133 _mesa_enum_to_string(target));
5134 break;
5135 }
5136 }
5137
5138 void GLAPIENTRY
_mesa_BindBuffersBase(GLenum target,GLuint first,GLsizei count,const GLuint * buffers)5139 _mesa_BindBuffersBase(GLenum target, GLuint first, GLsizei count,
5140 const GLuint *buffers)
5141 {
5142 GET_CURRENT_CONTEXT(ctx);
5143
5144 if (MESA_VERBOSE & VERBOSE_API) {
5145 _mesa_debug(ctx, "glBindBuffersBase(%s, %u, %d, %p)\n",
5146 _mesa_enum_to_string(target), first, count, buffers);
5147 }
5148
5149 switch (target) {
5150 case GL_TRANSFORM_FEEDBACK_BUFFER:
5151 bind_xfb_buffers(ctx, first, count, buffers, false, NULL, NULL,
5152 "glBindBuffersBase");
5153 return;
5154 case GL_UNIFORM_BUFFER:
5155 bind_uniform_buffers(ctx, first, count, buffers, false, NULL, NULL,
5156 "glBindBuffersBase");
5157 return;
5158 case GL_SHADER_STORAGE_BUFFER:
5159 bind_shader_storage_buffers(ctx, first, count, buffers, false, NULL, NULL,
5160 "glBindBuffersBase");
5161 return;
5162 case GL_ATOMIC_COUNTER_BUFFER:
5163 bind_atomic_buffers(ctx, first, count, buffers, false, NULL, NULL,
5164 "glBindBuffersBase");
5165 return;
5166 default:
5167 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersBase(target=%s)",
5168 _mesa_enum_to_string(target));
5169 break;
5170 }
5171 }
5172
5173 /**
5174 * Called via glInvalidateBuffer(Sub)Data.
5175 */
5176 static void
bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)5177 bufferobj_invalidate(struct gl_context *ctx,
5178 struct gl_buffer_object *obj,
5179 GLintptr offset,
5180 GLsizeiptr size)
5181 {
5182 struct pipe_context *pipe = ctx->pipe;
5183
5184 /* We ignore partial invalidates. */
5185 if (offset != 0 || size != obj->Size)
5186 return;
5187
5188 /* If the buffer is mapped, we can't invalidate it. */
5189 if (!obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
5190 return;
5191
5192 pipe->invalidate_resource(pipe, obj->buffer);
5193 }
5194
5195 static ALWAYS_INLINE void
invalidate_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length)5196 invalidate_buffer_subdata(struct gl_context *ctx,
5197 struct gl_buffer_object *bufObj, GLintptr offset,
5198 GLsizeiptr length)
5199 {
5200 if (ctx->has_invalidate_buffer)
5201 bufferobj_invalidate(ctx, bufObj, offset, length);
5202 }
5203
5204 void GLAPIENTRY
_mesa_InvalidateBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)5205 _mesa_InvalidateBufferSubData_no_error(GLuint buffer, GLintptr offset,
5206 GLsizeiptr length)
5207 {
5208 GET_CURRENT_CONTEXT(ctx);
5209
5210 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5211 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5212 }
5213
5214 void GLAPIENTRY
_mesa_InvalidateBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr length)5215 _mesa_InvalidateBufferSubData(GLuint buffer, GLintptr offset,
5216 GLsizeiptr length)
5217 {
5218 GET_CURRENT_CONTEXT(ctx);
5219 struct gl_buffer_object *bufObj;
5220 const GLintptr end = offset + length;
5221
5222 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5223 * Profile) spec says:
5224 *
5225 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5226 * name of an existing buffer object."
5227 */
5228 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5229 if (!bufObj || bufObj == &DummyBufferObject) {
5230 _mesa_error(ctx, GL_INVALID_VALUE,
5231 "glInvalidateBufferSubData(name = %u) invalid object",
5232 buffer);
5233 return;
5234 }
5235
5236 /* The GL_ARB_invalidate_subdata spec says:
5237 *
5238 * "An INVALID_VALUE error is generated if <offset> or <length> is
5239 * negative, or if <offset> + <length> is greater than the value of
5240 * BUFFER_SIZE."
5241 */
5242 if (offset < 0 || length < 0 || end > bufObj->Size) {
5243 _mesa_error(ctx, GL_INVALID_VALUE,
5244 "glInvalidateBufferSubData(invalid offset or length)");
5245 return;
5246 }
5247
5248 /* The OpenGL 4.4 (Core Profile) spec says:
5249 *
5250 * "An INVALID_OPERATION error is generated if buffer is currently
5251 * mapped by MapBuffer or if the invalidate range intersects the range
5252 * currently mapped by MapBufferRange, unless it was mapped
5253 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5254 */
5255 if (!(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT) &&
5256 bufferobj_range_mapped(bufObj, offset, length)) {
5257 _mesa_error(ctx, GL_INVALID_OPERATION,
5258 "glInvalidateBufferSubData(intersection with mapped "
5259 "range)");
5260 return;
5261 }
5262
5263 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5264 }
5265
5266 void GLAPIENTRY
_mesa_InvalidateBufferData_no_error(GLuint buffer)5267 _mesa_InvalidateBufferData_no_error(GLuint buffer)
5268 {
5269 GET_CURRENT_CONTEXT(ctx);
5270
5271 struct gl_buffer_object *bufObj =_mesa_lookup_bufferobj(ctx, buffer);
5272 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5273 }
5274
5275 void GLAPIENTRY
_mesa_InvalidateBufferData(GLuint buffer)5276 _mesa_InvalidateBufferData(GLuint buffer)
5277 {
5278 GET_CURRENT_CONTEXT(ctx);
5279 struct gl_buffer_object *bufObj;
5280
5281 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5282 * Profile) spec says:
5283 *
5284 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5285 * name of an existing buffer object."
5286 */
5287 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5288 if (!bufObj || bufObj == &DummyBufferObject) {
5289 _mesa_error(ctx, GL_INVALID_VALUE,
5290 "glInvalidateBufferData(name = %u) invalid object",
5291 buffer);
5292 return;
5293 }
5294
5295 /* The OpenGL 4.4 (Core Profile) spec says:
5296 *
5297 * "An INVALID_OPERATION error is generated if buffer is currently
5298 * mapped by MapBuffer or if the invalidate range intersects the range
5299 * currently mapped by MapBufferRange, unless it was mapped
5300 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5301 */
5302 if (_mesa_check_disallowed_mapping(bufObj)) {
5303 _mesa_error(ctx, GL_INVALID_OPERATION,
5304 "glInvalidateBufferData(intersection with mapped "
5305 "range)");
5306 return;
5307 }
5308
5309 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5310 }
5311
5312 static void
buffer_page_commitment(struct gl_context * ctx,struct gl_buffer_object * bufferObj,GLintptr offset,GLsizeiptr size,GLboolean commit,const char * func)5313 buffer_page_commitment(struct gl_context *ctx,
5314 struct gl_buffer_object *bufferObj,
5315 GLintptr offset, GLsizeiptr size,
5316 GLboolean commit, const char *func)
5317 {
5318 if (!(bufferObj->StorageFlags & GL_SPARSE_STORAGE_BIT_ARB)) {
5319 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(not a sparse buffer object)",
5320 func);
5321 return;
5322 }
5323
5324 if (size < 0 || size > bufferObj->Size ||
5325 offset < 0 || offset > bufferObj->Size - size) {
5326 _mesa_error(ctx, GL_INVALID_VALUE, "%s(out of bounds)",
5327 func);
5328 return;
5329 }
5330
5331 /* The GL_ARB_sparse_buffer extension specification says:
5332 *
5333 * "INVALID_VALUE is generated by BufferPageCommitmentARB if <offset> is
5334 * not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB, or if <size>
5335 * is not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB and does
5336 * not extend to the end of the buffer's data store."
5337 */
5338 if (offset % ctx->Const.SparseBufferPageSize != 0) {
5339 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset not aligned to page size)",
5340 func);
5341 return;
5342 }
5343
5344 if (size % ctx->Const.SparseBufferPageSize != 0 &&
5345 offset + size != bufferObj->Size) {
5346 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size not aligned to page size)",
5347 func);
5348 return;
5349 }
5350
5351 struct pipe_context *pipe = ctx->pipe;
5352 struct pipe_box box;
5353
5354 u_box_1d(offset, size, &box);
5355
5356 if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
5357 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
5358 }
5359 }
5360
5361 void GLAPIENTRY
_mesa_BufferPageCommitmentARB(GLenum target,GLintptr offset,GLsizeiptr size,GLboolean commit)5362 _mesa_BufferPageCommitmentARB(GLenum target, GLintptr offset, GLsizeiptr size,
5363 GLboolean commit)
5364 {
5365 GET_CURRENT_CONTEXT(ctx);
5366 struct gl_buffer_object *bufferObj;
5367
5368 bufferObj = get_buffer(ctx, "glBufferPageCommitmentARB", target,
5369 GL_INVALID_ENUM);
5370 if (!bufferObj)
5371 return;
5372
5373 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5374 "glBufferPageCommitmentARB");
5375 }
5376
5377 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentARB(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5378 _mesa_NamedBufferPageCommitmentARB(GLuint buffer, GLintptr offset,
5379 GLsizeiptr size, GLboolean commit)
5380 {
5381 GET_CURRENT_CONTEXT(ctx);
5382 struct gl_buffer_object *bufferObj;
5383
5384 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5385 if (!bufferObj || bufferObj == &DummyBufferObject) {
5386 /* Note: the extension spec is not clear about the excpected error value. */
5387 _mesa_error(ctx, GL_INVALID_VALUE,
5388 "glNamedBufferPageCommitmentARB(name = %u) invalid object",
5389 buffer);
5390 return;
5391 }
5392
5393 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5394 "glNamedBufferPageCommitmentARB");
5395 }
5396
5397 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5398 _mesa_NamedBufferPageCommitmentEXT(GLuint buffer, GLintptr offset,
5399 GLsizeiptr size, GLboolean commit)
5400 {
5401 GET_CURRENT_CONTEXT(ctx);
5402 struct gl_buffer_object *bufferObj;
5403
5404 /* Use NamedBuffer* functions logic from EXT_direct_state_access */
5405 if (buffer != 0) {
5406 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5407 if (!_mesa_handle_bind_buffer_gen(ctx, buffer, &bufferObj,
5408 "glNamedBufferPageCommitmentEXT", false))
5409 return;
5410 } else {
5411 /* GL_EXT_direct_state_access says about NamedBuffer* functions:
5412 *
5413 * There is no buffer corresponding to the name zero, these commands
5414 * generate the INVALID_OPERATION error if the buffer parameter is
5415 * zero.
5416 */
5417 _mesa_error(ctx, GL_INVALID_OPERATION,
5418 "glNamedBufferPageCommitmentEXT(buffer = 0)");
5419 return;
5420 }
5421 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5422 "glNamedBufferPageCommitmentEXT");
5423 }
5424