1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
5 * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26
27 /**
28 * \file bufferobj.c
29 * \brief Functions for the GL_ARB_vertex/pixel_buffer_object extensions.
30 * \author Brian Paul, Ian Romanick
31 */
32
33 #include <stdbool.h>
34 #include <inttypes.h> /* for PRId64 macro */
35 #include "util/u_debug.h"
36 #include "util/glheader.h"
37 #include "enums.h"
38 #include "hash.h"
39 #include "context.h"
40 #include "bufferobj.h"
41 #include "externalobjects.h"
42 #include "mtypes.h"
43 #include "teximage.h"
44 #include "glformats.h"
45 #include "texstore.h"
46 #include "transformfeedback.h"
47 #include "varray.h"
48 #include "util/u_atomic.h"
49 #include "util/u_memory.h"
50 #include "api_exec_decl.h"
51 #include "util/set.h"
52
53 #include "state_tracker/st_debug.h"
54 #include "state_tracker/st_atom.h"
55 #include "frontend/api.h"
56
57 #include "util/u_inlines.h"
58 /* Debug flags */
59 /*#define VBO_DEBUG*/
60 /*#define BOUNDS_CHECK*/
61
62
63 /**
64 * We count the number of buffer modification calls to check for
65 * inefficient buffer use. This is the number of such calls before we
66 * issue a warning.
67 */
68 #define BUFFER_WARNING_CALL_COUNT 4
69
70
71 /**
72 * Replace data in a subrange of buffer object. If the data range
73 * specified by size + offset extends beyond the end of the buffer or
74 * if data is NULL, no copy is performed.
75 * Called via glBufferSubDataARB().
76 */
77 void
_mesa_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)78 _mesa_bufferobj_subdata(struct gl_context *ctx,
79 GLintptrARB offset,
80 GLsizeiptrARB size,
81 const void *data, struct gl_buffer_object *obj)
82 {
83 /* we may be called from VBO code, so double-check params here */
84 assert(offset >= 0);
85 assert(size >= 0);
86 assert(offset + size <= obj->Size);
87
88 if (!size)
89 return;
90
91 /*
92 * According to ARB_vertex_buffer_object specification, if data is null,
93 * then the contents of the buffer object's data store is undefined. We just
94 * ignore, and leave it unchanged.
95 */
96 if (!data)
97 return;
98
99 if (!obj->buffer) {
100 /* we probably ran out of memory during buffer allocation */
101 return;
102 }
103
104 /* Now that transfers are per-context, we don't have to figure out
105 * flushing here. Usually drivers won't need to flush in this case
106 * even if the buffer is currently referenced by hardware - they
107 * just queue the upload as dma rather than mapping the underlying
108 * buffer directly.
109 *
110 * If the buffer is mapped, suppress implicit buffer range invalidation
111 * by using PIPE_MAP_DIRECTLY.
112 */
113 struct pipe_context *pipe = ctx->pipe;
114
115 pipe->buffer_subdata(pipe, obj->buffer,
116 _mesa_bufferobj_mapped(obj, MAP_USER) ?
117 PIPE_MAP_DIRECTLY : 0,
118 offset, size, data);
119 }
120
121
122 /**
123 * Called via glGetBufferSubDataARB().
124 */
125 static void
bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)126 bufferobj_get_subdata(struct gl_context *ctx,
127 GLintptrARB offset,
128 GLsizeiptrARB size,
129 void *data, struct gl_buffer_object *obj)
130 {
131 /* we may be called from VBO code, so double-check params here */
132 assert(offset >= 0);
133 assert(size >= 0);
134 assert(offset + size <= obj->Size);
135
136 if (!size)
137 return;
138
139 if (!obj->buffer) {
140 /* we probably ran out of memory during buffer allocation */
141 return;
142 }
143
144 pipe_buffer_read(ctx->pipe, obj->buffer,
145 offset, size, data);
146 }
147
148 void
_mesa_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)149 _mesa_bufferobj_get_subdata(struct gl_context *ctx,
150 GLintptrARB offset,
151 GLsizeiptrARB size,
152 void *data, struct gl_buffer_object *obj)
153 {
154 bufferobj_get_subdata(ctx, offset, size, data, obj);
155 }
156
157 /**
158 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
159 */
160 static unsigned
buffer_target_to_bind_flags(GLenum target)161 buffer_target_to_bind_flags(GLenum target)
162 {
163 switch (target) {
164 case GL_PIXEL_PACK_BUFFER_ARB:
165 case GL_PIXEL_UNPACK_BUFFER_ARB:
166 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
167 case GL_ARRAY_BUFFER_ARB:
168 return PIPE_BIND_VERTEX_BUFFER;
169 case GL_ELEMENT_ARRAY_BUFFER_ARB:
170 return PIPE_BIND_INDEX_BUFFER;
171 case GL_TEXTURE_BUFFER:
172 return PIPE_BIND_SAMPLER_VIEW;
173 case GL_TRANSFORM_FEEDBACK_BUFFER:
174 return PIPE_BIND_STREAM_OUTPUT;
175 case GL_UNIFORM_BUFFER:
176 return PIPE_BIND_CONSTANT_BUFFER;
177 case GL_DRAW_INDIRECT_BUFFER:
178 case GL_PARAMETER_BUFFER_ARB:
179 return PIPE_BIND_COMMAND_ARGS_BUFFER;
180 case GL_ATOMIC_COUNTER_BUFFER:
181 case GL_SHADER_STORAGE_BUFFER:
182 return PIPE_BIND_SHADER_BUFFER;
183 case GL_QUERY_BUFFER:
184 return PIPE_BIND_QUERY_BUFFER;
185 default:
186 return 0;
187 }
188 }
189
190
191 /**
192 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
193 */
194 static unsigned
storage_flags_to_buffer_flags(GLbitfield storageFlags)195 storage_flags_to_buffer_flags(GLbitfield storageFlags)
196 {
197 unsigned flags = 0;
198 if (storageFlags & GL_MAP_PERSISTENT_BIT)
199 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
200 if (storageFlags & GL_MAP_COHERENT_BIT)
201 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
202 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
203 flags |= PIPE_RESOURCE_FLAG_SPARSE;
204 return flags;
205 }
206
207
208 /**
209 * From a buffer object's target, immutability flag, storage flags and
210 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
211 * STREAM, etc).
212 */
213 static enum pipe_resource_usage
buffer_usage(GLenum target,GLboolean immutable,GLbitfield storageFlags,GLenum usage)214 buffer_usage(GLenum target, GLboolean immutable,
215 GLbitfield storageFlags, GLenum usage)
216 {
217 /* "immutable" means that "storageFlags" was set by the user and "usage"
218 * was guessed by Mesa. Otherwise, "usage" was set by the user and
219 * storageFlags was guessed by Mesa.
220 *
221 * Therefore, use storageFlags with immutable, else use "usage".
222 */
223 if (immutable) {
224 /* BufferStorage */
225 if (storageFlags & GL_MAP_READ_BIT)
226 return PIPE_USAGE_STAGING;
227 else if (storageFlags & GL_CLIENT_STORAGE_BIT)
228 return PIPE_USAGE_STREAM;
229 else
230 return PIPE_USAGE_DEFAULT;
231 }
232 else {
233 /* These are often read by the CPU, so enable CPU caches. */
234 if (target == GL_PIXEL_PACK_BUFFER ||
235 target == GL_PIXEL_UNPACK_BUFFER)
236 return PIPE_USAGE_STAGING;
237
238 /* BufferData */
239 switch (usage) {
240 case GL_DYNAMIC_DRAW:
241 case GL_DYNAMIC_COPY:
242 return PIPE_USAGE_DYNAMIC;
243 case GL_STREAM_DRAW:
244 case GL_STREAM_COPY:
245 return PIPE_USAGE_STREAM;
246 case GL_STATIC_READ:
247 case GL_DYNAMIC_READ:
248 case GL_STREAM_READ:
249 return PIPE_USAGE_STAGING;
250 case GL_STATIC_DRAW:
251 case GL_STATIC_COPY:
252 default:
253 return PIPE_USAGE_DEFAULT;
254 }
255 }
256 }
257
258
259 static ALWAYS_INLINE GLboolean
bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)260 bufferobj_data(struct gl_context *ctx,
261 GLenum target,
262 GLsizeiptrARB size,
263 const void *data,
264 struct gl_memory_object *memObj,
265 GLuint64 offset,
266 GLenum usage,
267 GLbitfield storageFlags,
268 struct gl_buffer_object *obj)
269 {
270 struct pipe_context *pipe = ctx->pipe;
271 struct pipe_screen *screen = pipe->screen;
272 bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
273
274 if (size > UINT32_MAX || offset > UINT32_MAX) {
275 /* pipe_resource.width0 is 32 bits only and increasing it
276 * to 64 bits doesn't make much sense since hw support
277 * for > 4GB resources is limited.
278 */
279 obj->Size = 0;
280 return GL_FALSE;
281 }
282
283 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
284 size && obj->buffer &&
285 obj->Size == size &&
286 obj->Usage == usage &&
287 obj->StorageFlags == storageFlags) {
288 if (data) {
289 /* Just discard the old contents and write new data.
290 * This should be the same as creating a new buffer, but we avoid
291 * a lot of validation in Mesa.
292 *
293 * If the buffer is mapped, we can't discard it.
294 *
295 * PIPE_MAP_DIRECTLY supresses implicit buffer range
296 * invalidation.
297 */
298 pipe->buffer_subdata(pipe, obj->buffer,
299 is_mapped ? PIPE_MAP_DIRECTLY :
300 PIPE_MAP_DISCARD_WHOLE_RESOURCE,
301 0, size, data);
302 return GL_TRUE;
303 } else if (is_mapped) {
304 return GL_TRUE; /* can't reallocate, nothing to do */
305 } else if (screen->caps.invalidate_buffer) {
306 pipe->invalidate_resource(pipe, obj->buffer);
307 return GL_TRUE;
308 }
309 }
310
311 obj->Size = size;
312 obj->Usage = usage;
313 obj->StorageFlags = storageFlags;
314
315 _mesa_bufferobj_release_buffer(obj);
316
317 unsigned bindings = buffer_target_to_bind_flags(target);
318
319 if (storageFlags & MESA_GALLIUM_VERTEX_STATE_STORAGE)
320 bindings |= PIPE_BIND_VERTEX_STATE;
321
322 if (ST_DEBUG & DEBUG_BUFFER) {
323 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
324 (int64_t) size, bindings);
325 }
326
327 if (size != 0) {
328 struct pipe_resource buffer;
329
330 memset(&buffer, 0, sizeof buffer);
331 buffer.target = PIPE_BUFFER;
332 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
333 buffer.bind = bindings;
334 buffer.usage =
335 buffer_usage(target, obj->Immutable, storageFlags, usage);
336 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
337 buffer.width0 = size;
338 buffer.height0 = 1;
339 buffer.depth0 = 1;
340 buffer.array_size = 1;
341
342 if (memObj) {
343 obj->buffer = screen->resource_from_memobj(screen, &buffer,
344 memObj->memory,
345 offset);
346 }
347 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
348 obj->buffer =
349 screen->resource_from_user_memory(screen, &buffer, (void*)data);
350 }
351 else {
352 obj->buffer = screen->resource_create(screen, &buffer);
353
354 if (obj->buffer && data)
355 pipe_buffer_write(pipe, obj->buffer, 0, size, data);
356 }
357
358 if (!obj->buffer) {
359 /* out of memory */
360 obj->Size = 0;
361 return GL_FALSE;
362 }
363
364 obj->private_refcount_ctx = ctx;
365 }
366
367 /* The current buffer may be bound, so we have to revalidate all atoms that
368 * might be using it.
369 */
370 if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
371 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
372 if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
373 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
374 if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
375 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
376 if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
377 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
378 if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
379 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
380
381 return GL_TRUE;
382 }
383
384 /**
385 * Allocate space for and store data in a buffer object. Any data that was
386 * previously stored in the buffer object is lost. If data is NULL,
387 * memory will be allocated, but no copy will occur.
388 * Called via ctx->Driver.BufferData().
389 * \return GL_TRUE for success, GL_FALSE if out of memory
390 */
391 GLboolean
_mesa_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)392 _mesa_bufferobj_data(struct gl_context *ctx,
393 GLenum target,
394 GLsizeiptrARB size,
395 const void *data,
396 GLenum usage,
397 GLbitfield storageFlags,
398 struct gl_buffer_object *obj)
399 {
400 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
401 }
402
403 static GLboolean
bufferobj_data_mem(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,struct gl_buffer_object * bufObj)404 bufferobj_data_mem(struct gl_context *ctx,
405 GLenum target,
406 GLsizeiptrARB size,
407 struct gl_memory_object *memObj,
408 GLuint64 offset,
409 GLenum usage,
410 struct gl_buffer_object *bufObj)
411 {
412 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, GL_DYNAMIC_STORAGE_BIT, bufObj);
413 }
414
415 /**
416 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
417 * \param wholeBuffer is the whole buffer being mapped?
418 */
419 enum pipe_map_flags
_mesa_access_flags_to_transfer_flags(GLbitfield access,bool wholeBuffer)420 _mesa_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
421 {
422 enum pipe_map_flags flags = 0;
423
424 if (access & GL_MAP_WRITE_BIT)
425 flags |= PIPE_MAP_WRITE;
426
427 if (access & GL_MAP_READ_BIT)
428 flags |= PIPE_MAP_READ;
429
430 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
431 flags |= PIPE_MAP_FLUSH_EXPLICIT;
432
433 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
434 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
435 }
436 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
437 if (wholeBuffer)
438 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
439 else
440 flags |= PIPE_MAP_DISCARD_RANGE;
441 }
442
443 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
444 flags |= PIPE_MAP_UNSYNCHRONIZED;
445
446 if (access & GL_MAP_PERSISTENT_BIT)
447 flags |= PIPE_MAP_PERSISTENT;
448
449 if (access & GL_MAP_COHERENT_BIT)
450 flags |= PIPE_MAP_COHERENT;
451
452 /* ... other flags ...
453 */
454
455 if (access & MESA_MAP_NOWAIT_BIT)
456 flags |= PIPE_MAP_DONTBLOCK;
457 if (access & MESA_MAP_THREAD_SAFE_BIT)
458 flags |= PIPE_MAP_THREAD_SAFE;
459 if (access & MESA_MAP_ONCE)
460 flags |= PIPE_MAP_ONCE;
461
462 return flags;
463 }
464
465
466 /**
467 * Called via glMapBufferRange().
468 */
469 void *
_mesa_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)470 _mesa_bufferobj_map_range(struct gl_context *ctx,
471 GLintptr offset, GLsizeiptr length, GLbitfield access,
472 struct gl_buffer_object *obj,
473 gl_map_buffer_index index)
474 {
475 struct pipe_context *pipe = ctx->pipe;
476
477 assert(offset >= 0);
478 assert(length >= 0);
479 assert(offset < obj->Size);
480 assert(offset + length <= obj->Size);
481
482 enum pipe_map_flags transfer_flags =
483 _mesa_access_flags_to_transfer_flags(access,
484 offset == 0 && length == obj->Size);
485
486 /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
487 * In this case, the the UNSYNC is a bit redundant, but the games rely
488 * on the driver rebinding/replacing the backing storage rather than
489 * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
490 */
491 if (unlikely(ctx->st_opts->ignore_map_unsynchronized)) {
492 if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
493 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
494 }
495
496 if (ctx->Const.ForceMapBufferSynchronized)
497 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
498
499 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
500 obj->buffer,
501 offset, length,
502 transfer_flags,
503 &obj->transfer[index]);
504 if (obj->Mappings[index].Pointer) {
505 obj->Mappings[index].Offset = offset;
506 obj->Mappings[index].Length = length;
507 obj->Mappings[index].AccessFlags = access;
508 }
509 else {
510 obj->transfer[index] = NULL;
511 }
512
513 return obj->Mappings[index].Pointer;
514 }
515
516
517 void
_mesa_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)518 _mesa_bufferobj_flush_mapped_range(struct gl_context *ctx,
519 GLintptr offset, GLsizeiptr length,
520 struct gl_buffer_object *obj,
521 gl_map_buffer_index index)
522 {
523 struct pipe_context *pipe = ctx->pipe;
524
525 /* Subrange is relative to mapped range */
526 assert(offset >= 0);
527 assert(length >= 0);
528 assert(offset + length <= obj->Mappings[index].Length);
529 assert(obj->Mappings[index].Pointer);
530
531 if (!length)
532 return;
533
534 pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
535 obj->Mappings[index].Offset + offset,
536 length);
537 }
538
539
540 /**
541 * Called via glUnmapBufferARB().
542 */
543 GLboolean
_mesa_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)544 _mesa_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
545 gl_map_buffer_index index)
546 {
547 struct pipe_context *pipe = ctx->pipe;
548
549 if (obj->Mappings[index].Length)
550 pipe_buffer_unmap(pipe, obj->transfer[index]);
551
552 obj->transfer[index] = NULL;
553 obj->Mappings[index].Pointer = NULL;
554 obj->Mappings[index].Offset = 0;
555 obj->Mappings[index].Length = 0;
556 return GL_TRUE;
557 }
558
559
560 /**
561 * Called via glCopyBufferSubData().
562 */
563 static void
bufferobj_copy_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)564 bufferobj_copy_subdata(struct gl_context *ctx,
565 struct gl_buffer_object *src,
566 struct gl_buffer_object *dst,
567 GLintptr readOffset, GLintptr writeOffset,
568 GLsizeiptr size)
569 {
570 struct pipe_context *pipe = ctx->pipe;
571 struct pipe_box box;
572
573 dst->MinMaxCacheDirty = true;
574 if (!size)
575 return;
576
577 /* buffer should not already be mapped */
578 assert(!_mesa_check_disallowed_mapping(src));
579 /* dst can be mapped, just not the same range as the target range */
580
581 u_box_1d(readOffset, size, &box);
582
583 pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
584 src->buffer, 0, &box);
585 }
586
587 static void
clear_buffer_subdata_sw(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const GLvoid * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)588 clear_buffer_subdata_sw(struct gl_context *ctx,
589 GLintptr offset, GLsizeiptr size,
590 const GLvoid *clearValue,
591 GLsizeiptr clearValueSize,
592 struct gl_buffer_object *bufObj)
593 {
594 GLsizeiptr i;
595 GLubyte *dest;
596
597 dest = _mesa_bufferobj_map_range(ctx, offset, size,
598 GL_MAP_WRITE_BIT |
599 GL_MAP_INVALIDATE_RANGE_BIT,
600 bufObj, MAP_INTERNAL);
601
602 if (!dest) {
603 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
604 return;
605 }
606
607 if (clearValue == NULL) {
608 /* Clear with zeros, per the spec */
609 memset(dest, 0, size);
610 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
611 return;
612 }
613
614 for (i = 0; i < size/clearValueSize; ++i) {
615 memcpy(dest, clearValue, clearValueSize);
616 dest += clearValueSize;
617 }
618
619 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
620 }
621
622 /**
623 * Helper to warn of possible performance issues, such as frequently
624 * updating a buffer created with GL_STATIC_DRAW. Called via the macro
625 * below.
626 */
627 static void
buffer_usage_warning(struct gl_context * ctx,GLuint * id,const char * fmt,...)628 buffer_usage_warning(struct gl_context *ctx, GLuint *id, const char *fmt, ...)
629 {
630 va_list args;
631
632 va_start(args, fmt);
633 _mesa_gl_vdebugf(ctx, id,
634 MESA_DEBUG_SOURCE_API,
635 MESA_DEBUG_TYPE_PERFORMANCE,
636 MESA_DEBUG_SEVERITY_MEDIUM,
637 fmt, args);
638 va_end(args);
639 }
640
641 #define BUFFER_USAGE_WARNING(CTX, FMT, ...) \
642 do { \
643 static GLuint id = 0; \
644 buffer_usage_warning(CTX, &id, FMT, ##__VA_ARGS__); \
645 } while (0)
646
647
648 /**
649 * Used as a placeholder for buffer objects between glGenBuffers() and
650 * glBindBuffer() so that glIsBuffer() can work correctly.
651 */
652 static struct gl_buffer_object DummyBufferObject = {
653 .MinMaxCacheMutex = SIMPLE_MTX_INITIALIZER,
654 .RefCount = 1000*1000*1000, /* never delete */
655 };
656
657
658 /**
659 * Return pointer to address of a buffer object target.
660 * \param ctx the GL context
661 * \param target the buffer object target to be retrieved.
662 * \return pointer to pointer to the buffer object bound to \c target in the
663 * specified context or \c NULL if \c target is invalid.
664 */
665 static ALWAYS_INLINE struct gl_buffer_object **
get_buffer_target(struct gl_context * ctx,GLenum target,bool no_error)666 get_buffer_target(struct gl_context *ctx, GLenum target, bool no_error)
667 {
668 /* Other targets are only supported in desktop OpenGL and OpenGL ES 3.0. */
669 if (!no_error && !_mesa_is_desktop_gl(ctx) && !_mesa_is_gles3(ctx)) {
670 switch (target) {
671 case GL_ARRAY_BUFFER:
672 case GL_ELEMENT_ARRAY_BUFFER:
673 case GL_PIXEL_PACK_BUFFER:
674 case GL_PIXEL_UNPACK_BUFFER:
675 break;
676 default:
677 return NULL;
678 }
679 }
680
681 switch (target) {
682 case GL_ARRAY_BUFFER_ARB:
683 return &ctx->Array.ArrayBufferObj;
684 case GL_ELEMENT_ARRAY_BUFFER_ARB:
685 return &ctx->Array.VAO->IndexBufferObj;
686 case GL_PIXEL_PACK_BUFFER_EXT:
687 return &ctx->Pack.BufferObj;
688 case GL_PIXEL_UNPACK_BUFFER_EXT:
689 return &ctx->Unpack.BufferObj;
690 case GL_COPY_READ_BUFFER:
691 return &ctx->CopyReadBuffer;
692 case GL_COPY_WRITE_BUFFER:
693 return &ctx->CopyWriteBuffer;
694 case GL_QUERY_BUFFER:
695 if (no_error || _mesa_has_ARB_query_buffer_object(ctx))
696 return &ctx->QueryBuffer;
697 break;
698 case GL_DRAW_INDIRECT_BUFFER:
699 if (no_error ||
700 (_mesa_is_desktop_gl(ctx) && ctx->Extensions.ARB_draw_indirect) ||
701 _mesa_is_gles31(ctx)) {
702 return &ctx->DrawIndirectBuffer;
703 }
704 break;
705 case GL_PARAMETER_BUFFER_ARB:
706 if (no_error || _mesa_has_ARB_indirect_parameters(ctx)) {
707 return &ctx->ParameterBuffer;
708 }
709 break;
710 case GL_DISPATCH_INDIRECT_BUFFER:
711 if (no_error || _mesa_has_compute_shaders(ctx)) {
712 return &ctx->DispatchIndirectBuffer;
713 }
714 break;
715 case GL_TRANSFORM_FEEDBACK_BUFFER:
716 if (no_error || ctx->Extensions.EXT_transform_feedback) {
717 return &ctx->TransformFeedback.CurrentBuffer;
718 }
719 break;
720 case GL_TEXTURE_BUFFER:
721 if (no_error ||
722 _mesa_has_ARB_texture_buffer_object(ctx) ||
723 _mesa_has_OES_texture_buffer(ctx)) {
724 return &ctx->Texture.BufferObject;
725 }
726 break;
727 case GL_UNIFORM_BUFFER:
728 if (no_error || ctx->Extensions.ARB_uniform_buffer_object) {
729 return &ctx->UniformBuffer;
730 }
731 break;
732 case GL_SHADER_STORAGE_BUFFER:
733 if (no_error ||
734 ctx->Extensions.ARB_shader_storage_buffer_object ||
735 _mesa_is_gles31(ctx)) {
736 return &ctx->ShaderStorageBuffer;
737 }
738 break;
739 case GL_ATOMIC_COUNTER_BUFFER:
740 if (no_error ||
741 ctx->Extensions.ARB_shader_atomic_counters || _mesa_is_gles31(ctx)) {
742 return &ctx->AtomicBuffer;
743 }
744 break;
745 case GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD:
746 if (no_error || ctx->Extensions.AMD_pinned_memory) {
747 return &ctx->ExternalVirtualMemoryBuffer;
748 }
749 break;
750 }
751 return NULL;
752 }
753
754
755 /**
756 * Get the buffer object bound to the specified target in a GL context.
757 * \param ctx the GL context
758 * \param target the buffer object target to be retrieved.
759 * \param error the GL error to record if target is illegal.
760 * \return pointer to the buffer object bound to \c target in the
761 * specified context or \c NULL if \c target is invalid.
762 */
763 static inline struct gl_buffer_object *
get_buffer(struct gl_context * ctx,const char * func,GLenum target,GLenum error)764 get_buffer(struct gl_context *ctx, const char *func, GLenum target,
765 GLenum error)
766 {
767 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, false);
768
769 if (!bufObj) {
770 _mesa_error(ctx, GL_INVALID_ENUM, "%s(target)", func);
771 return NULL;
772 }
773
774 if (!*bufObj) {
775 _mesa_error(ctx, error, "%s(no buffer bound)", func);
776 return NULL;
777 }
778
779 return *bufObj;
780 }
781
782
783 /**
784 * Convert a GLbitfield describing the mapped buffer access flags
785 * into one of GL_READ_WRITE, GL_READ_ONLY, or GL_WRITE_ONLY.
786 */
787 static GLenum
simplified_access_mode(struct gl_context * ctx,GLbitfield access)788 simplified_access_mode(struct gl_context *ctx, GLbitfield access)
789 {
790 const GLbitfield rwFlags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
791 if ((access & rwFlags) == rwFlags)
792 return GL_READ_WRITE;
793 if ((access & GL_MAP_READ_BIT) == GL_MAP_READ_BIT)
794 return GL_READ_ONLY;
795 if ((access & GL_MAP_WRITE_BIT) == GL_MAP_WRITE_BIT)
796 return GL_WRITE_ONLY;
797
798 /* Otherwise, AccessFlags is zero (the default state).
799 *
800 * Table 2.6 on page 31 (page 44 of the PDF) of the OpenGL 1.5 spec says:
801 *
802 * Name Type Initial Value Legal Values
803 * ... ... ... ...
804 * BUFFER_ACCESS enum READ_WRITE READ_ONLY, WRITE_ONLY
805 * READ_WRITE
806 *
807 * However, table 6.8 in the GL_OES_mapbuffer extension says:
808 *
809 * Get Value Type Get Command Value Description
810 * --------- ---- ----------- ----- -----------
811 * BUFFER_ACCESS_OES Z1 GetBufferParameteriv WRITE_ONLY_OES buffer map flag
812 *
813 * The difference is because GL_OES_mapbuffer only supports mapping buffers
814 * write-only.
815 */
816 assert(access == 0);
817
818 return _mesa_is_gles(ctx) ? GL_WRITE_ONLY : GL_READ_WRITE;
819 }
820
821
822 /**
823 * Test if the buffer is mapped, and if so, if the mapped range overlaps the
824 * given range.
825 * The regions do not overlap if and only if the end of the given
826 * region is before the mapped region or the start of the given region
827 * is after the mapped region.
828 *
829 * \param obj Buffer object target on which to operate.
830 * \param offset Offset of the first byte of the subdata range.
831 * \param size Size, in bytes, of the subdata range.
832 * \return true if ranges overlap, false otherwise
833 *
834 */
835 static bool
bufferobj_range_mapped(const struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)836 bufferobj_range_mapped(const struct gl_buffer_object *obj,
837 GLintptr offset, GLsizeiptr size)
838 {
839 if (_mesa_bufferobj_mapped(obj, MAP_USER)) {
840 const GLintptr end = offset + size;
841 const GLintptr mapEnd = obj->Mappings[MAP_USER].Offset +
842 obj->Mappings[MAP_USER].Length;
843
844 if (!(end <= obj->Mappings[MAP_USER].Offset || offset >= mapEnd)) {
845 return true;
846 }
847 }
848 return false;
849 }
850
851
852 /**
853 * Tests the subdata range parameters and sets the GL error code for
854 * \c glBufferSubDataARB, \c glGetBufferSubDataARB and
855 * \c glClearBufferSubData.
856 *
857 * \param ctx GL context.
858 * \param bufObj The buffer object.
859 * \param offset Offset of the first byte of the subdata range.
860 * \param size Size, in bytes, of the subdata range.
861 * \param mappedRange If true, checks if an overlapping range is mapped.
862 * If false, checks if buffer is mapped.
863 * \param caller Name of calling function for recording errors.
864 * \return false if error, true otherwise
865 *
866 * \sa glBufferSubDataARB, glGetBufferSubDataARB, glClearBufferSubData
867 */
868 static bool
buffer_object_subdata_range_good(struct gl_context * ctx,const struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool mappedRange,const char * caller)869 buffer_object_subdata_range_good(struct gl_context *ctx,
870 const struct gl_buffer_object *bufObj,
871 GLintptr offset, GLsizeiptr size,
872 bool mappedRange, const char *caller)
873 {
874 if (size < 0) {
875 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", caller);
876 return false;
877 }
878
879 if (offset < 0) {
880 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset < 0)", caller);
881 return false;
882 }
883
884 if (offset + size > bufObj->Size) {
885 _mesa_error(ctx, GL_INVALID_VALUE,
886 "%s(offset %lu + size %lu > buffer size %lu)", caller,
887 (unsigned long) offset,
888 (unsigned long) size,
889 (unsigned long) bufObj->Size);
890 return false;
891 }
892
893 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT)
894 return true;
895
896 if (mappedRange) {
897 if (bufferobj_range_mapped(bufObj, offset, size)) {
898 _mesa_error(ctx, GL_INVALID_OPERATION,
899 "%s(range is mapped without persistent bit)",
900 caller);
901 return false;
902 }
903 }
904 else {
905 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
906 _mesa_error(ctx, GL_INVALID_OPERATION,
907 "%s(buffer is mapped without persistent bit)",
908 caller);
909 return false;
910 }
911 }
912
913 return true;
914 }
915
916
917 /**
918 * Test the format and type parameters and set the GL error code for
919 * \c glClearBufferData, \c glClearNamedBufferData, \c glClearBufferSubData
920 * and \c glClearNamedBufferSubData.
921 *
922 * \param ctx GL context.
923 * \param internalformat Format to which the data is to be converted.
924 * \param format Format of the supplied data.
925 * \param type Type of the supplied data.
926 * \param caller Name of calling function for recording errors.
927 * \return If internalformat, format and type are legal the mesa_format
928 * corresponding to internalformat, otherwise MESA_FORMAT_NONE.
929 *
930 * \sa glClearBufferData, glClearNamedBufferData, glClearBufferSubData and
931 * glClearNamedBufferSubData.
932 */
933 static mesa_format
validate_clear_buffer_format(struct gl_context * ctx,GLenum internalformat,GLenum format,GLenum type,const char * caller)934 validate_clear_buffer_format(struct gl_context *ctx,
935 GLenum internalformat,
936 GLenum format, GLenum type,
937 const char *caller)
938 {
939 mesa_format mesaFormat;
940 GLenum errorFormatType;
941
942 mesaFormat = _mesa_validate_texbuffer_format(ctx, internalformat);
943 if (mesaFormat == MESA_FORMAT_NONE) {
944 _mesa_error(ctx, GL_INVALID_ENUM,
945 "%s(invalid internalformat)", caller);
946 return MESA_FORMAT_NONE;
947 }
948
949 /* NOTE: not mentioned in ARB_clear_buffer_object but according to
950 * EXT_texture_integer there is no conversion between integer and
951 * non-integer formats
952 */
953 if (_mesa_is_enum_format_signed_int(format) !=
954 _mesa_is_format_integer_color(mesaFormat)) {
955 _mesa_error(ctx, GL_INVALID_OPERATION,
956 "%s(integer vs non-integer)", caller);
957 return MESA_FORMAT_NONE;
958 }
959
960 if (!_mesa_is_color_format(format)) {
961 _mesa_error(ctx, GL_INVALID_VALUE,
962 "%s(format is not a color format)", caller);
963 return MESA_FORMAT_NONE;
964 }
965
966 errorFormatType = _mesa_error_check_format_and_type(ctx, format, type);
967 if (errorFormatType != GL_NO_ERROR) {
968 _mesa_error(ctx, GL_INVALID_VALUE,
969 "%s(invalid format or type)", caller);
970 return MESA_FORMAT_NONE;
971 }
972
973 return mesaFormat;
974 }
975
976
977 /**
978 * Convert user-specified clear value to the specified internal format.
979 *
980 * \param ctx GL context.
981 * \param internalformat Format to which the data is converted.
982 * \param clearValue Points to the converted clear value.
983 * \param format Format of the supplied data.
984 * \param type Type of the supplied data.
985 * \param data Data which is to be converted to internalformat.
986 * \param caller Name of calling function for recording errors.
987 * \return true if data could be converted, false otherwise.
988 *
989 * \sa glClearBufferData, glClearBufferSubData
990 */
991 static bool
convert_clear_buffer_data(struct gl_context * ctx,mesa_format internalformat,GLubyte * clearValue,GLenum format,GLenum type,const GLvoid * data,const char * caller)992 convert_clear_buffer_data(struct gl_context *ctx,
993 mesa_format internalformat,
994 GLubyte *clearValue, GLenum format, GLenum type,
995 const GLvoid *data, const char *caller)
996 {
997 GLenum internalformatBase = _mesa_get_format_base_format(internalformat);
998 struct gl_pixelstore_attrib packing = {.Alignment = 1};
999
1000 if (_mesa_texstore(ctx, 1, internalformatBase, internalformat,
1001 0, &clearValue, 1, 1, 1,
1002 format, type, data, &packing)) {
1003 return true;
1004 }
1005 else {
1006 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1007 return false;
1008 }
1009 }
1010
1011 void
_mesa_bufferobj_release_buffer(struct gl_buffer_object * obj)1012 _mesa_bufferobj_release_buffer(struct gl_buffer_object *obj)
1013 {
1014 if (!obj->buffer)
1015 return;
1016
1017 /* Subtract the remaining private references before unreferencing
1018 * the buffer. See the header file for explanation.
1019 */
1020 if (obj->private_refcount) {
1021 assert(obj->private_refcount > 0);
1022 p_atomic_add(&obj->buffer->reference.count,
1023 -obj->private_refcount);
1024 obj->private_refcount = 0;
1025 }
1026 obj->private_refcount_ctx = NULL;
1027
1028 pipe_resource_reference(&obj->buffer, NULL);
1029 }
1030
1031 /**
1032 * Delete a buffer object.
1033 *
1034 * Default callback for the \c dd_function_table::DeleteBuffer() hook.
1035 */
1036 void
_mesa_delete_buffer_object(struct gl_context * ctx,struct gl_buffer_object * bufObj)1037 _mesa_delete_buffer_object(struct gl_context *ctx,
1038 struct gl_buffer_object *bufObj)
1039 {
1040 assert(bufObj->RefCount == 0);
1041 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1042 _mesa_bufferobj_release_buffer(bufObj);
1043
1044 vbo_delete_minmax_cache(bufObj);
1045
1046 /* assign strange values here to help w/ debugging */
1047 bufObj->RefCount = -1000;
1048 bufObj->Name = ~0;
1049
1050 simple_mtx_destroy(&bufObj->MinMaxCacheMutex);
1051 free(bufObj->Label);
1052 free(bufObj);
1053 }
1054
1055
1056 /**
1057 * Get the value of MESA_NO_MINMAX_CACHE.
1058 */
1059 static bool
get_no_minmax_cache()1060 get_no_minmax_cache()
1061 {
1062 static bool read = false;
1063 static bool disable = false;
1064
1065 if (!read) {
1066 disable = debug_get_bool_option("MESA_NO_MINMAX_CACHE", false);
1067 read = true;
1068 }
1069
1070 return disable;
1071 }
1072
1073 /**
1074 * Callback called from _mesa_HashWalk()
1075 */
1076 static void
count_buffer_size(void * data,void * userData)1077 count_buffer_size(void *data, void *userData)
1078 {
1079 const struct gl_buffer_object *bufObj =
1080 (const struct gl_buffer_object *) data;
1081 GLuint *total = (GLuint *) userData;
1082
1083 *total = *total + bufObj->Size;
1084 }
1085
1086
1087 /**
1088 * Initialize the state associated with buffer objects
1089 */
1090 void
_mesa_init_buffer_objects(struct gl_context * ctx)1091 _mesa_init_buffer_objects( struct gl_context *ctx )
1092 {
1093 GLuint i;
1094
1095 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1096 _mesa_reference_buffer_object(ctx,
1097 &ctx->UniformBufferBindings[i].BufferObject,
1098 NULL);
1099 ctx->UniformBufferBindings[i].Offset = -1;
1100 ctx->UniformBufferBindings[i].Size = -1;
1101 }
1102
1103 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1104 _mesa_reference_buffer_object(ctx,
1105 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1106 NULL);
1107 ctx->ShaderStorageBufferBindings[i].Offset = -1;
1108 ctx->ShaderStorageBufferBindings[i].Size = -1;
1109 }
1110
1111 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1112 _mesa_reference_buffer_object(ctx,
1113 &ctx->AtomicBufferBindings[i].BufferObject,
1114 NULL);
1115 ctx->AtomicBufferBindings[i].Offset = 0;
1116 ctx->AtomicBufferBindings[i].Size = 0;
1117 }
1118 }
1119
1120 /**
1121 * Detach the context from the buffer to re-enable buffer reference counting
1122 * for this context.
1123 */
1124 static void
detach_ctx_from_buffer(struct gl_context * ctx,struct gl_buffer_object * buf)1125 detach_ctx_from_buffer(struct gl_context *ctx, struct gl_buffer_object *buf)
1126 {
1127 assert(buf->Ctx == ctx);
1128
1129 /* Move private non-atomic context references to the global ref count. */
1130 p_atomic_add(&buf->RefCount, buf->CtxRefCount);
1131 buf->CtxRefCount = 0;
1132 buf->Ctx = NULL;
1133
1134 /* Remove the context reference where the context holds one
1135 * reference for the lifetime of the buffer ID to skip refcount
1136 * atomics instead of each binding point holding the reference.
1137 */
1138 _mesa_reference_buffer_object(ctx, &buf, NULL);
1139 }
1140
1141 /**
1142 * Zombie buffers are buffers that were created by one context and deleted
1143 * by another context. The creating context holds a global reference for each
1144 * buffer it created that can't be unreferenced when another context deletes
1145 * it. Such a buffer becomes a zombie, which means that it's no longer usable
1146 * by OpenGL, but the creating context still holds its global reference of
1147 * the buffer. Only the creating context can remove the reference, which is
1148 * what this function does.
1149 *
1150 * For all zombie buffers, decrement the reference count if the current
1151 * context owns the buffer.
1152 */
1153 static void
unreference_zombie_buffers_for_ctx(struct gl_context * ctx)1154 unreference_zombie_buffers_for_ctx(struct gl_context *ctx)
1155 {
1156 /* It's assumed that the mutex of Shared->BufferObjects is locked. */
1157 set_foreach(ctx->Shared->ZombieBufferObjects, entry) {
1158 struct gl_buffer_object *buf = (struct gl_buffer_object *)entry->key;
1159
1160 if (buf->Ctx == ctx) {
1161 _mesa_set_remove(ctx->Shared->ZombieBufferObjects, entry);
1162 detach_ctx_from_buffer(ctx, buf);
1163 }
1164 }
1165 }
1166
1167 /**
1168 * When a context creates buffers, it holds a global buffer reference count
1169 * for each buffer and doesn't update their RefCount. When the context is
1170 * destroyed before the buffers are destroyed, the context must remove
1171 * its global reference from the buffers, so that the buffers can live
1172 * on their own.
1173 *
1174 * At this point, the buffers shouldn't be bound in any bounding point owned
1175 * by the context. (it would crash if they did)
1176 */
1177 static void
detach_unrefcounted_buffer_from_ctx(void * data,void * userData)1178 detach_unrefcounted_buffer_from_ctx(void *data, void *userData)
1179 {
1180 struct gl_context *ctx = (struct gl_context *)userData;
1181 struct gl_buffer_object *buf = (struct gl_buffer_object *)data;
1182
1183 if (buf->Ctx == ctx) {
1184 /* Detach the current context from live objects. There should be no
1185 * bound buffer in the context at this point, therefore we can just
1186 * unreference the global reference. Other contexts and texture objects
1187 * might still be using the buffer.
1188 */
1189 assert(buf->CtxRefCount == 0);
1190 buf->Ctx = NULL;
1191 _mesa_reference_buffer_object(ctx, &buf, NULL);
1192 }
1193 }
1194
1195 void
_mesa_free_buffer_objects(struct gl_context * ctx)1196 _mesa_free_buffer_objects( struct gl_context *ctx )
1197 {
1198 GLuint i;
1199
1200 _mesa_reference_buffer_object(ctx, &ctx->Array.ArrayBufferObj, NULL);
1201
1202 _mesa_reference_buffer_object(ctx, &ctx->CopyReadBuffer, NULL);
1203 _mesa_reference_buffer_object(ctx, &ctx->CopyWriteBuffer, NULL);
1204
1205 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, NULL);
1206
1207 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, NULL);
1208
1209 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, NULL);
1210
1211 _mesa_reference_buffer_object(ctx, &ctx->DrawIndirectBuffer, NULL);
1212
1213 _mesa_reference_buffer_object(ctx, &ctx->ParameterBuffer, NULL);
1214
1215 _mesa_reference_buffer_object(ctx, &ctx->DispatchIndirectBuffer, NULL);
1216
1217 _mesa_reference_buffer_object(ctx, &ctx->QueryBuffer, NULL);
1218
1219 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1220 _mesa_reference_buffer_object(ctx,
1221 &ctx->UniformBufferBindings[i].BufferObject,
1222 NULL);
1223 }
1224
1225 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1226 _mesa_reference_buffer_object(ctx,
1227 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1228 NULL);
1229 }
1230
1231 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1232 _mesa_reference_buffer_object(ctx,
1233 &ctx->AtomicBufferBindings[i].BufferObject,
1234 NULL);
1235 }
1236
1237 _mesa_HashLockMutex(&ctx->Shared->BufferObjects);
1238 unreference_zombie_buffers_for_ctx(ctx);
1239 _mesa_HashWalkLocked(&ctx->Shared->BufferObjects,
1240 detach_unrefcounted_buffer_from_ctx, ctx);
1241 _mesa_HashUnlockMutex(&ctx->Shared->BufferObjects);
1242 }
1243
1244 struct gl_buffer_object *
_mesa_bufferobj_alloc(struct gl_context * ctx,GLuint id)1245 _mesa_bufferobj_alloc(struct gl_context *ctx, GLuint id)
1246 {
1247 struct gl_buffer_object *buf = CALLOC_STRUCT(gl_buffer_object);
1248 if (!buf)
1249 return NULL;
1250
1251 buf->RefCount = 1;
1252 buf->Name = id;
1253 buf->Usage = GL_STATIC_DRAW_ARB;
1254
1255 simple_mtx_init(&buf->MinMaxCacheMutex, mtx_plain);
1256 if (get_no_minmax_cache())
1257 buf->UsageHistory |= USAGE_DISABLE_MINMAX_CACHE;
1258 return buf;
1259 }
1260 /**
1261 * Create a buffer object that will be backed by an OpenGL buffer ID
1262 * where the creating context will hold one global buffer reference instead
1263 * of updating buffer RefCount for every binding point.
1264 *
1265 * This shouldn't be used for internal buffers.
1266 */
1267 static struct gl_buffer_object *
new_gl_buffer_object(struct gl_context * ctx,GLuint id)1268 new_gl_buffer_object(struct gl_context *ctx, GLuint id)
1269 {
1270 struct gl_buffer_object *buf = _mesa_bufferobj_alloc(ctx, id);
1271
1272 buf->Ctx = ctx;
1273 buf->RefCount++; /* global buffer reference held by the context */
1274 return buf;
1275 }
1276
1277 static ALWAYS_INLINE bool
handle_bind_buffer_gen(struct gl_context * ctx,GLuint buffer,struct gl_buffer_object ** buf_handle,const char * caller,bool no_error)1278 handle_bind_buffer_gen(struct gl_context *ctx,
1279 GLuint buffer,
1280 struct gl_buffer_object **buf_handle,
1281 const char *caller, bool no_error)
1282 {
1283 struct gl_buffer_object *buf = *buf_handle;
1284
1285 if (unlikely(!no_error && !buf && _mesa_is_desktop_gl_core(ctx))) {
1286 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(non-gen name)", caller);
1287 return false;
1288 }
1289
1290 if (unlikely(!buf || buf == &DummyBufferObject)) {
1291 /* If this is a new buffer object id, or one which was generated but
1292 * never used before, allocate a buffer object now.
1293 */
1294 *buf_handle = new_gl_buffer_object(ctx, buffer);
1295 if (!no_error && !*buf_handle) {
1296 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1297 return false;
1298 }
1299 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1300 ctx->BufferObjectsLocked);
1301 _mesa_HashInsertLocked(&ctx->Shared->BufferObjects, buffer,
1302 *buf_handle);
1303 /* If one context only creates buffers and another context only deletes
1304 * buffers, buffers don't get released because it only produces zombie
1305 * buffers. Only the context that has created the buffers can release
1306 * them. Thus, when we create buffers, we prune the list of zombie
1307 * buffers.
1308 */
1309 unreference_zombie_buffers_for_ctx(ctx);
1310 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1311 ctx->BufferObjectsLocked);
1312 }
1313
1314 return true;
1315 }
1316
1317 bool
_mesa_handle_bind_buffer_gen(struct gl_context * ctx,GLuint buffer,struct gl_buffer_object ** buf_handle,const char * caller,bool no_error)1318 _mesa_handle_bind_buffer_gen(struct gl_context *ctx,
1319 GLuint buffer,
1320 struct gl_buffer_object **buf_handle,
1321 const char *caller, bool no_error)
1322 {
1323 return handle_bind_buffer_gen(ctx, buffer, buf_handle, caller, no_error);
1324 }
1325
1326 /**
1327 * Bind the specified target to buffer for the specified context.
1328 * Called by glBindBuffer() and other functions.
1329 */
1330 static void
bind_buffer_object(struct gl_context * ctx,struct gl_buffer_object ** bindTarget,GLuint buffer,bool no_error)1331 bind_buffer_object(struct gl_context *ctx,
1332 struct gl_buffer_object **bindTarget, GLuint buffer,
1333 bool no_error)
1334 {
1335 struct gl_buffer_object *oldBufObj;
1336 struct gl_buffer_object *newBufObj;
1337
1338 assert(bindTarget);
1339
1340 /* Fast path that unbinds. It's better when NULL is a literal, so that
1341 * the compiler can simplify this code after inlining.
1342 */
1343 if (buffer == 0) {
1344 _mesa_reference_buffer_object(ctx, bindTarget, NULL);
1345 return;
1346 }
1347
1348 /* Get pointer to old buffer object (to be unbound) */
1349 oldBufObj = *bindTarget;
1350 GLuint old_name = oldBufObj && !oldBufObj->DeletePending ? oldBufObj->Name : 0;
1351 if (unlikely(old_name == buffer))
1352 return; /* rebinding the same buffer object- no change */
1353
1354 newBufObj = _mesa_lookup_bufferobj(ctx, buffer);
1355 /* Get a new buffer object if it hasn't been created. */
1356 if (unlikely(!handle_bind_buffer_gen(ctx, buffer, &newBufObj, "glBindBuffer",
1357 no_error)))
1358 return;
1359
1360 /* At this point, the compiler should deduce that newBufObj is non-NULL if
1361 * everything has been inlined, so the compiler should simplify this.
1362 */
1363 _mesa_reference_buffer_object(ctx, bindTarget, newBufObj);
1364 }
1365
1366
1367 /**
1368 * Update the default buffer objects in the given context to reference those
1369 * specified in the shared state and release those referencing the old
1370 * shared state.
1371 */
1372 void
_mesa_update_default_objects_buffer_objects(struct gl_context * ctx)1373 _mesa_update_default_objects_buffer_objects(struct gl_context *ctx)
1374 {
1375 /* Bind 0 to remove references to those in the shared context hash table. */
1376 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1377 bind_buffer_object(ctx, &ctx->Array.VAO->IndexBufferObj, 0, false);
1378 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1379 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1380 }
1381
1382
1383
1384 /**
1385 * Return the gl_buffer_object for the given ID.
1386 * Always return NULL for ID 0.
1387 */
1388 struct gl_buffer_object *
_mesa_lookup_bufferobj(struct gl_context * ctx,GLuint buffer)1389 _mesa_lookup_bufferobj(struct gl_context *ctx, GLuint buffer)
1390 {
1391 if (buffer == 0)
1392 return NULL;
1393 else
1394 return (struct gl_buffer_object *)
1395 _mesa_HashLookupMaybeLocked(&ctx->Shared->BufferObjects, buffer,
1396 ctx->BufferObjectsLocked);
1397 }
1398
1399
1400 struct gl_buffer_object *
_mesa_lookup_bufferobj_locked(struct gl_context * ctx,GLuint buffer)1401 _mesa_lookup_bufferobj_locked(struct gl_context *ctx, GLuint buffer)
1402 {
1403 if (buffer == 0)
1404 return NULL;
1405 else
1406 return (struct gl_buffer_object *)
1407 _mesa_HashLookupLocked(&ctx->Shared->BufferObjects, buffer);
1408 }
1409
1410 /**
1411 * A convenience function for direct state access functions that throws
1412 * GL_INVALID_OPERATION if buffer is not the name of an existing
1413 * buffer object.
1414 */
1415 struct gl_buffer_object *
_mesa_lookup_bufferobj_err(struct gl_context * ctx,GLuint buffer,const char * caller)1416 _mesa_lookup_bufferobj_err(struct gl_context *ctx, GLuint buffer,
1417 const char *caller)
1418 {
1419 struct gl_buffer_object *bufObj;
1420
1421 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
1422 if (!bufObj || bufObj == &DummyBufferObject) {
1423 _mesa_error(ctx, GL_INVALID_OPERATION,
1424 "%s(non-existent buffer object %u)", caller, buffer);
1425 return NULL;
1426 }
1427
1428 return bufObj;
1429 }
1430
1431
1432 /**
1433 * Look up a buffer object for a multi-bind function.
1434 *
1435 * Unlike _mesa_lookup_bufferobj(), this function also takes care
1436 * of generating an error if the buffer ID is not zero or the name
1437 * of an existing buffer object.
1438 *
1439 * If the buffer ID refers to an existing buffer object, a pointer
1440 * to the buffer object is returned. If the ID is zero, NULL is returned.
1441 * If the ID is not zero and does not refer to a valid buffer object, this
1442 * function returns NULL.
1443 *
1444 * This function assumes that the caller has already locked the
1445 * hash table mutex by calling
1446 * _mesa_HashLockMutex(&ctx->Shared->BufferObjects).
1447 */
1448 struct gl_buffer_object *
_mesa_multi_bind_lookup_bufferobj(struct gl_context * ctx,const GLuint * buffers,GLuint index,const char * caller,bool * error)1449 _mesa_multi_bind_lookup_bufferobj(struct gl_context *ctx,
1450 const GLuint *buffers,
1451 GLuint index, const char *caller,
1452 bool *error)
1453 {
1454 struct gl_buffer_object *bufObj = NULL;
1455
1456 *error = false;
1457
1458 if (buffers[index] != 0) {
1459 bufObj = _mesa_lookup_bufferobj_locked(ctx, buffers[index]);
1460
1461 /* The multi-bind functions don't create the buffer objects
1462 when they don't exist. */
1463 if (bufObj == &DummyBufferObject)
1464 bufObj = NULL;
1465
1466 if (!bufObj) {
1467 /* The ARB_multi_bind spec says:
1468 *
1469 * "An INVALID_OPERATION error is generated if any value
1470 * in <buffers> is not zero or the name of an existing
1471 * buffer object (per binding)."
1472 */
1473 _mesa_error(ctx, GL_INVALID_OPERATION,
1474 "%s(buffers[%u]=%u is not zero or the name "
1475 "of an existing buffer object)",
1476 caller, index, buffers[index]);
1477 *error = true;
1478 }
1479 }
1480
1481 return bufObj;
1482 }
1483
1484
1485 /**
1486 * If *ptr points to obj, set ptr = the Null/default buffer object.
1487 * This is a helper for buffer object deletion.
1488 * The GL spec says that deleting a buffer object causes it to get
1489 * unbound from all arrays in the current context.
1490 */
1491 static void
unbind(struct gl_context * ctx,struct gl_vertex_array_object * vao,unsigned index,struct gl_buffer_object * obj)1492 unbind(struct gl_context *ctx,
1493 struct gl_vertex_array_object *vao, unsigned index,
1494 struct gl_buffer_object *obj)
1495 {
1496 if (vao->BufferBinding[index].BufferObj == obj) {
1497 _mesa_bind_vertex_buffer(ctx, vao, index, NULL,
1498 vao->BufferBinding[index].Offset,
1499 vao->BufferBinding[index].Stride, true, false);
1500 }
1501 }
1502
1503 void
_mesa_buffer_unmap_all_mappings(struct gl_context * ctx,struct gl_buffer_object * bufObj)1504 _mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
1505 struct gl_buffer_object *bufObj)
1506 {
1507 for (int i = 0; i < MAP_COUNT; i++) {
1508 if (_mesa_bufferobj_mapped(bufObj, i)) {
1509 _mesa_bufferobj_unmap(ctx, bufObj, i);
1510 assert(bufObj->Mappings[i].Pointer == NULL);
1511 bufObj->Mappings[i].AccessFlags = 0;
1512 }
1513 }
1514 }
1515
1516
1517 /**********************************************************************/
1518 /* API Functions */
1519 /**********************************************************************/
1520
1521 void GLAPIENTRY
_mesa_BindBuffer_no_error(GLenum target,GLuint buffer)1522 _mesa_BindBuffer_no_error(GLenum target, GLuint buffer)
1523 {
1524 GET_CURRENT_CONTEXT(ctx);
1525
1526 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target, true);
1527 bind_buffer_object(ctx, bindTarget, buffer, true);
1528 }
1529
1530
1531 void GLAPIENTRY
_mesa_BindBuffer(GLenum target,GLuint buffer)1532 _mesa_BindBuffer(GLenum target, GLuint buffer)
1533 {
1534 GET_CURRENT_CONTEXT(ctx);
1535
1536 if (MESA_VERBOSE & VERBOSE_API) {
1537 _mesa_debug(ctx, "glBindBuffer(%s, %u)\n",
1538 _mesa_enum_to_string(target), buffer);
1539 }
1540
1541 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target, false);
1542 if (!bindTarget) {
1543 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferARB(target %s)",
1544 _mesa_enum_to_string(target));
1545 return;
1546 }
1547
1548 bind_buffer_object(ctx, bindTarget, buffer, false);
1549 }
1550
1551 /**
1552 * Binds a buffer object to a binding point.
1553 *
1554 * The caller is responsible for validating the offset,
1555 * flushing the vertices and updating NewDriverState.
1556 */
1557 static void
set_buffer_binding(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool autoSize,gl_buffer_usage usage)1558 set_buffer_binding(struct gl_context *ctx,
1559 struct gl_buffer_binding *binding,
1560 struct gl_buffer_object *bufObj,
1561 GLintptr offset,
1562 GLsizeiptr size,
1563 bool autoSize, gl_buffer_usage usage)
1564 {
1565 _mesa_reference_buffer_object(ctx, &binding->BufferObject, bufObj);
1566
1567 binding->Offset = offset;
1568 binding->Size = size;
1569 binding->AutomaticSize = autoSize;
1570
1571 /* If this is a real buffer object, mark it has having been used
1572 * at some point as an atomic counter buffer.
1573 */
1574 if (size >= 0)
1575 bufObj->UsageHistory |= usage;
1576 }
1577
1578 static void
set_buffer_multi_binding(struct gl_context * ctx,const GLuint * buffers,int idx,const char * caller,struct gl_buffer_binding * binding,GLintptr offset,GLsizeiptr size,bool range,gl_buffer_usage usage)1579 set_buffer_multi_binding(struct gl_context *ctx,
1580 const GLuint *buffers,
1581 int idx,
1582 const char *caller,
1583 struct gl_buffer_binding *binding,
1584 GLintptr offset,
1585 GLsizeiptr size,
1586 bool range,
1587 gl_buffer_usage usage)
1588 {
1589 struct gl_buffer_object *bufObj;
1590
1591 if (binding->BufferObject && binding->BufferObject->Name == buffers[idx])
1592 bufObj = binding->BufferObject;
1593 else {
1594 bool error;
1595 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, idx, caller,
1596 &error);
1597 if (error)
1598 return;
1599 }
1600
1601 if (!bufObj)
1602 set_buffer_binding(ctx, binding, bufObj, -1, -1, !range, usage);
1603 else
1604 set_buffer_binding(ctx, binding, bufObj, offset, size, !range, usage);
1605 }
1606
1607 static void
bind_buffer(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize,uint64_t driver_state,gl_buffer_usage usage)1608 bind_buffer(struct gl_context *ctx,
1609 struct gl_buffer_binding *binding,
1610 struct gl_buffer_object *bufObj,
1611 GLintptr offset,
1612 GLsizeiptr size,
1613 GLboolean autoSize,
1614 uint64_t driver_state,
1615 gl_buffer_usage usage)
1616 {
1617 if (binding->BufferObject == bufObj &&
1618 binding->Offset == offset &&
1619 binding->Size == size &&
1620 binding->AutomaticSize == autoSize) {
1621 return;
1622 }
1623
1624 FLUSH_VERTICES(ctx, 0, 0);
1625 ctx->NewDriverState |= driver_state;
1626
1627 set_buffer_binding(ctx, binding, bufObj, offset, size, autoSize, usage);
1628 }
1629
1630 /**
1631 * Binds a buffer object to a uniform buffer binding point.
1632 *
1633 * Unlike set_buffer_binding(), this function also flushes vertices
1634 * and updates NewDriverState. It also checks if the binding
1635 * has actually changed before updating it.
1636 */
1637 static void
bind_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1638 bind_uniform_buffer(struct gl_context *ctx,
1639 GLuint index,
1640 struct gl_buffer_object *bufObj,
1641 GLintptr offset,
1642 GLsizeiptr size,
1643 GLboolean autoSize)
1644 {
1645 bind_buffer(ctx, &ctx->UniformBufferBindings[index],
1646 bufObj, offset, size, autoSize,
1647 ST_NEW_UNIFORM_BUFFER,
1648 USAGE_UNIFORM_BUFFER);
1649 }
1650
1651 /**
1652 * Binds a buffer object to a shader storage buffer binding point.
1653 *
1654 * Unlike set_ssbo_binding(), this function also flushes vertices
1655 * and updates NewDriverState. It also checks if the binding
1656 * has actually changed before updating it.
1657 */
1658 static void
bind_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1659 bind_shader_storage_buffer(struct gl_context *ctx,
1660 GLuint index,
1661 struct gl_buffer_object *bufObj,
1662 GLintptr offset,
1663 GLsizeiptr size,
1664 GLboolean autoSize)
1665 {
1666 bind_buffer(ctx, &ctx->ShaderStorageBufferBindings[index],
1667 bufObj, offset, size, autoSize,
1668 ST_NEW_STORAGE_BUFFER,
1669 USAGE_SHADER_STORAGE_BUFFER);
1670 }
1671
1672 /**
1673 * Binds a buffer object to an atomic buffer binding point.
1674 *
1675 * Unlike set_atomic_binding(), this function also flushes vertices
1676 * and updates NewDriverState. It also checks if the binding
1677 * has actually changed before updating it.
1678 */
1679 static void
bind_atomic_buffer(struct gl_context * ctx,unsigned index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1680 bind_atomic_buffer(struct gl_context *ctx, unsigned index,
1681 struct gl_buffer_object *bufObj, GLintptr offset,
1682 GLsizeiptr size, GLboolean autoSize)
1683 {
1684 bind_buffer(ctx, &ctx->AtomicBufferBindings[index],
1685 bufObj, offset, size, autoSize,
1686 ctx->DriverFlags.NewAtomicBuffer,
1687 USAGE_ATOMIC_COUNTER_BUFFER);
1688 }
1689
1690 /**
1691 * Bind a buffer object to a uniform block binding point.
1692 * As above, but offset = 0.
1693 */
1694 static void
bind_buffer_base_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1695 bind_buffer_base_uniform_buffer(struct gl_context *ctx,
1696 GLuint index,
1697 struct gl_buffer_object *bufObj)
1698 {
1699 if (index >= ctx->Const.MaxUniformBufferBindings) {
1700 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1701 return;
1702 }
1703
1704 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
1705
1706 if (!bufObj)
1707 bind_uniform_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1708 else
1709 bind_uniform_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1710 }
1711
1712 /**
1713 * Bind a buffer object to a shader storage block binding point.
1714 * As above, but offset = 0.
1715 */
1716 static void
bind_buffer_base_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1717 bind_buffer_base_shader_storage_buffer(struct gl_context *ctx,
1718 GLuint index,
1719 struct gl_buffer_object *bufObj)
1720 {
1721 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
1722 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1723 return;
1724 }
1725
1726 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
1727
1728 if (!bufObj)
1729 bind_shader_storage_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1730 else
1731 bind_shader_storage_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1732 }
1733
1734 /**
1735 * Bind a buffer object to a shader storage block binding point.
1736 * As above, but offset = 0.
1737 */
1738 static void
bind_buffer_base_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1739 bind_buffer_base_atomic_buffer(struct gl_context *ctx,
1740 GLuint index,
1741 struct gl_buffer_object *bufObj)
1742 {
1743 if (index >= ctx->Const.MaxAtomicBufferBindings) {
1744 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1745 return;
1746 }
1747
1748 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
1749
1750 if (!bufObj)
1751 bind_atomic_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1752 else
1753 bind_atomic_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1754 }
1755
1756 /**
1757 * Delete a set of buffer objects.
1758 *
1759 * \param n Number of buffer objects to delete.
1760 * \param ids Array of \c n buffer object IDs.
1761 */
1762 static void
delete_buffers(struct gl_context * ctx,GLsizei n,const GLuint * ids)1763 delete_buffers(struct gl_context *ctx, GLsizei n, const GLuint *ids)
1764 {
1765 FLUSH_VERTICES(ctx, 0, 0);
1766
1767 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1768 ctx->BufferObjectsLocked);
1769 unreference_zombie_buffers_for_ctx(ctx);
1770
1771 for (GLsizei i = 0; i < n; i++) {
1772 struct gl_buffer_object *bufObj =
1773 _mesa_lookup_bufferobj_locked(ctx, ids[i]);
1774 if (bufObj) {
1775 struct gl_vertex_array_object *vao = ctx->Array.VAO;
1776 GLuint j;
1777
1778 assert(bufObj->Name == ids[i] || bufObj == &DummyBufferObject);
1779
1780 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1781
1782 /* unbind any vertex pointers bound to this buffer */
1783 for (j = 0; j < ARRAY_SIZE(vao->BufferBinding); j++) {
1784 unbind(ctx, vao, j, bufObj);
1785 }
1786
1787 if (ctx->Array.ArrayBufferObj == bufObj) {
1788 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1789 }
1790 if (vao->IndexBufferObj == bufObj) {
1791 bind_buffer_object(ctx, &vao->IndexBufferObj, 0, false);
1792 }
1793
1794 /* unbind ARB_draw_indirect binding point */
1795 if (ctx->DrawIndirectBuffer == bufObj) {
1796 bind_buffer_object(ctx, &ctx->DrawIndirectBuffer, 0, false);
1797 }
1798
1799 /* unbind ARB_indirect_parameters binding point */
1800 if (ctx->ParameterBuffer == bufObj) {
1801 bind_buffer_object(ctx, &ctx->ParameterBuffer, 0, false);
1802 }
1803
1804 /* unbind ARB_compute_shader binding point */
1805 if (ctx->DispatchIndirectBuffer == bufObj) {
1806 bind_buffer_object(ctx, &ctx->DispatchIndirectBuffer, 0, false);
1807 }
1808
1809 /* unbind ARB_copy_buffer binding points */
1810 if (ctx->CopyReadBuffer == bufObj) {
1811 bind_buffer_object(ctx, &ctx->CopyReadBuffer, 0, false);
1812 }
1813 if (ctx->CopyWriteBuffer == bufObj) {
1814 bind_buffer_object(ctx, &ctx->CopyWriteBuffer, 0, false);
1815 }
1816
1817 /* unbind transform feedback binding points */
1818 if (ctx->TransformFeedback.CurrentBuffer == bufObj) {
1819 bind_buffer_object(ctx, &ctx->TransformFeedback.CurrentBuffer, 0, false);
1820 }
1821 for (j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1822 if (ctx->TransformFeedback.CurrentObject->Buffers[j] == bufObj) {
1823 _mesa_bind_buffer_base_transform_feedback(ctx,
1824 ctx->TransformFeedback.CurrentObject,
1825 j, NULL, false);
1826 }
1827 }
1828
1829 /* unbind UBO binding points */
1830 for (j = 0; j < ctx->Const.MaxUniformBufferBindings; j++) {
1831 if (ctx->UniformBufferBindings[j].BufferObject == bufObj) {
1832 bind_buffer_base_uniform_buffer(ctx, j, NULL);
1833 }
1834 }
1835
1836 if (ctx->UniformBuffer == bufObj) {
1837 bind_buffer_object(ctx, &ctx->UniformBuffer, 0, false);
1838 }
1839
1840 /* unbind SSBO binding points */
1841 for (j = 0; j < ctx->Const.MaxShaderStorageBufferBindings; j++) {
1842 if (ctx->ShaderStorageBufferBindings[j].BufferObject == bufObj) {
1843 bind_buffer_base_shader_storage_buffer(ctx, j, NULL);
1844 }
1845 }
1846
1847 if (ctx->ShaderStorageBuffer == bufObj) {
1848 bind_buffer_object(ctx, &ctx->ShaderStorageBuffer, 0, false);
1849 }
1850
1851 /* unbind Atomci Buffer binding points */
1852 for (j = 0; j < ctx->Const.MaxAtomicBufferBindings; j++) {
1853 if (ctx->AtomicBufferBindings[j].BufferObject == bufObj) {
1854 bind_buffer_base_atomic_buffer(ctx, j, NULL);
1855 }
1856 }
1857
1858 if (ctx->AtomicBuffer == bufObj) {
1859 bind_buffer_object(ctx, &ctx->AtomicBuffer, 0, false);
1860 }
1861
1862 /* unbind any pixel pack/unpack pointers bound to this buffer */
1863 if (ctx->Pack.BufferObj == bufObj) {
1864 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1865 }
1866 if (ctx->Unpack.BufferObj == bufObj) {
1867 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1868 }
1869
1870 if (ctx->Texture.BufferObject == bufObj) {
1871 bind_buffer_object(ctx, &ctx->Texture.BufferObject, 0, false);
1872 }
1873
1874 if (ctx->ExternalVirtualMemoryBuffer == bufObj) {
1875 bind_buffer_object(ctx, &ctx->ExternalVirtualMemoryBuffer, 0, false);
1876 }
1877
1878 /* unbind query buffer binding point */
1879 if (ctx->QueryBuffer == bufObj) {
1880 bind_buffer_object(ctx, &ctx->QueryBuffer, 0, false);
1881 }
1882
1883 /* The ID is immediately freed for re-use */
1884 _mesa_HashRemoveLocked(&ctx->Shared->BufferObjects, ids[i]);
1885 /* Make sure we do not run into the classic ABA problem on bind.
1886 * We don't want to allow re-binding a buffer object that's been
1887 * "deleted" by glDeleteBuffers().
1888 *
1889 * The explicit rebinding to the default object in the current context
1890 * prevents the above in the current context, but another context
1891 * sharing the same objects might suffer from this problem.
1892 * The alternative would be to do the hash lookup in any case on bind
1893 * which would introduce more runtime overhead than this.
1894 */
1895 bufObj->DeletePending = GL_TRUE;
1896
1897 /* The GLuint ID holds one reference and the context that created
1898 * the buffer holds the other one.
1899 */
1900 assert(p_atomic_read(&bufObj->RefCount) >= (bufObj->Ctx ? 2 : 1));
1901
1902 if (bufObj->Ctx == ctx) {
1903 detach_ctx_from_buffer(ctx, bufObj);
1904 } else if (bufObj->Ctx) {
1905 /* Only the context holding it can release it. */
1906 _mesa_set_add(ctx->Shared->ZombieBufferObjects, bufObj);
1907 }
1908
1909 _mesa_reference_buffer_object(ctx, &bufObj, NULL);
1910 }
1911 }
1912
1913 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1914 ctx->BufferObjectsLocked);
1915 }
1916
1917
1918 void GLAPIENTRY
_mesa_DeleteBuffers_no_error(GLsizei n,const GLuint * ids)1919 _mesa_DeleteBuffers_no_error(GLsizei n, const GLuint *ids)
1920 {
1921 GET_CURRENT_CONTEXT(ctx);
1922 delete_buffers(ctx, n, ids);
1923 }
1924
1925
1926 void GLAPIENTRY
_mesa_DeleteBuffers(GLsizei n,const GLuint * ids)1927 _mesa_DeleteBuffers(GLsizei n, const GLuint *ids)
1928 {
1929 GET_CURRENT_CONTEXT(ctx);
1930
1931 if (n < 0) {
1932 _mesa_error(ctx, GL_INVALID_VALUE, "glDeleteBuffersARB(n)");
1933 return;
1934 }
1935
1936 delete_buffers(ctx, n, ids);
1937 }
1938
1939
1940 /**
1941 * This is the implementation for glGenBuffers and glCreateBuffers. It is not
1942 * exposed to the rest of Mesa to encourage the use of nameless buffers in
1943 * driver internals.
1944 */
1945 static void
create_buffers(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)1946 create_buffers(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
1947 {
1948 struct gl_buffer_object *buf;
1949
1950 if (!buffers)
1951 return;
1952
1953 /*
1954 * This must be atomic (generation and allocation of buffer object IDs)
1955 */
1956 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1957 ctx->BufferObjectsLocked);
1958 /* If one context only creates buffers and another context only deletes
1959 * buffers, buffers don't get released because it only produces zombie
1960 * buffers. Only the context that has created the buffers can release
1961 * them. Thus, when we create buffers, we prune the list of zombie
1962 * buffers.
1963 */
1964 unreference_zombie_buffers_for_ctx(ctx);
1965
1966 _mesa_HashFindFreeKeys(&ctx->Shared->BufferObjects, buffers, n);
1967
1968 /* Insert the ID and pointer into the hash table. If non-DSA, insert a
1969 * DummyBufferObject. Otherwise, create a new buffer object and insert
1970 * it.
1971 */
1972 for (int i = 0; i < n; i++) {
1973 if (dsa) {
1974 buf = new_gl_buffer_object(ctx, buffers[i]);
1975 if (!buf) {
1976 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCreateBuffers");
1977 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1978 ctx->BufferObjectsLocked);
1979 return;
1980 }
1981 }
1982 else
1983 buf = &DummyBufferObject;
1984
1985 _mesa_HashInsertLocked(&ctx->Shared->BufferObjects, buffers[i], buf);
1986 }
1987
1988 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1989 ctx->BufferObjectsLocked);
1990 }
1991
1992
1993 static void
create_buffers_err(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)1994 create_buffers_err(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
1995 {
1996 const char *func = dsa ? "glCreateBuffers" : "glGenBuffers";
1997
1998 if (MESA_VERBOSE & VERBOSE_API)
1999 _mesa_debug(ctx, "%s(%d)\n", func, n);
2000
2001 if (n < 0) {
2002 _mesa_error(ctx, GL_INVALID_VALUE, "%s(n %d < 0)", func, n);
2003 return;
2004 }
2005
2006 create_buffers(ctx, n, buffers, dsa);
2007 }
2008
2009 /**
2010 * Generate a set of unique buffer object IDs and store them in \c buffers.
2011 *
2012 * \param n Number of IDs to generate.
2013 * \param buffers Array of \c n locations to store the IDs.
2014 */
2015 void GLAPIENTRY
_mesa_GenBuffers_no_error(GLsizei n,GLuint * buffers)2016 _mesa_GenBuffers_no_error(GLsizei n, GLuint *buffers)
2017 {
2018 GET_CURRENT_CONTEXT(ctx);
2019 create_buffers(ctx, n, buffers, false);
2020 }
2021
2022
2023 void GLAPIENTRY
_mesa_GenBuffers(GLsizei n,GLuint * buffers)2024 _mesa_GenBuffers(GLsizei n, GLuint *buffers)
2025 {
2026 GET_CURRENT_CONTEXT(ctx);
2027 create_buffers_err(ctx, n, buffers, false);
2028 }
2029
2030 /**
2031 * Create a set of buffer objects and store their unique IDs in \c buffers.
2032 *
2033 * \param n Number of IDs to generate.
2034 * \param buffers Array of \c n locations to store the IDs.
2035 */
2036 void GLAPIENTRY
_mesa_CreateBuffers_no_error(GLsizei n,GLuint * buffers)2037 _mesa_CreateBuffers_no_error(GLsizei n, GLuint *buffers)
2038 {
2039 GET_CURRENT_CONTEXT(ctx);
2040 create_buffers(ctx, n, buffers, true);
2041 }
2042
2043
2044 void GLAPIENTRY
_mesa_CreateBuffers(GLsizei n,GLuint * buffers)2045 _mesa_CreateBuffers(GLsizei n, GLuint *buffers)
2046 {
2047 GET_CURRENT_CONTEXT(ctx);
2048 create_buffers_err(ctx, n, buffers, true);
2049 }
2050
2051
2052 /**
2053 * Determine if ID is the name of a buffer object.
2054 *
2055 * \param id ID of the potential buffer object.
2056 * \return \c GL_TRUE if \c id is the name of a buffer object,
2057 * \c GL_FALSE otherwise.
2058 */
2059 GLboolean GLAPIENTRY
_mesa_IsBuffer(GLuint id)2060 _mesa_IsBuffer(GLuint id)
2061 {
2062 struct gl_buffer_object *bufObj;
2063 GET_CURRENT_CONTEXT(ctx);
2064 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
2065
2066 bufObj = _mesa_lookup_bufferobj(ctx, id);
2067
2068 return bufObj && bufObj != &DummyBufferObject;
2069 }
2070
2071
2072 static bool
validate_buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLsizeiptr size,GLbitfield flags,const char * func)2073 validate_buffer_storage(struct gl_context *ctx,
2074 struct gl_buffer_object *bufObj, GLsizeiptr size,
2075 GLbitfield flags, const char *func)
2076 {
2077 if (size <= 0) {
2078 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size <= 0)", func);
2079 return false;
2080 }
2081
2082 GLbitfield valid_flags = GL_MAP_READ_BIT |
2083 GL_MAP_WRITE_BIT |
2084 GL_MAP_PERSISTENT_BIT |
2085 GL_MAP_COHERENT_BIT |
2086 GL_DYNAMIC_STORAGE_BIT |
2087 GL_CLIENT_STORAGE_BIT;
2088
2089 if (ctx->Extensions.ARB_sparse_buffer)
2090 valid_flags |= GL_SPARSE_STORAGE_BIT_ARB;
2091
2092 if (flags & ~valid_flags) {
2093 _mesa_error(ctx, GL_INVALID_VALUE, "%s(invalid flag bits set)", func);
2094 return false;
2095 }
2096
2097 /* The Errors section of the GL_ARB_sparse_buffer spec says:
2098 *
2099 * "INVALID_VALUE is generated by BufferStorage if <flags> contains
2100 * SPARSE_STORAGE_BIT_ARB and <flags> also contains any combination of
2101 * MAP_READ_BIT or MAP_WRITE_BIT."
2102 */
2103 if (flags & GL_SPARSE_STORAGE_BIT_ARB &&
2104 flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) {
2105 _mesa_error(ctx, GL_INVALID_VALUE, "%s(SPARSE_STORAGE and READ/WRITE)", func);
2106 return false;
2107 }
2108
2109 if (flags & GL_MAP_PERSISTENT_BIT &&
2110 !(flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT))) {
2111 _mesa_error(ctx, GL_INVALID_VALUE,
2112 "%s(PERSISTENT and flags!=READ/WRITE)", func);
2113 return false;
2114 }
2115
2116 if (flags & GL_MAP_COHERENT_BIT && !(flags & GL_MAP_PERSISTENT_BIT)) {
2117 _mesa_error(ctx, GL_INVALID_VALUE,
2118 "%s(COHERENT and flags!=PERSISTENT)", func);
2119 return false;
2120 }
2121
2122 if (bufObj->Immutable || bufObj->HandleAllocated) {
2123 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2124 return false;
2125 }
2126
2127 return true;
2128 }
2129
2130
2131 static void
buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,struct gl_memory_object * memObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint64 offset,const char * func)2132 buffer_storage(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2133 struct gl_memory_object *memObj, GLenum target,
2134 GLsizeiptr size, const GLvoid *data, GLbitfield flags,
2135 GLuint64 offset, const char *func)
2136 {
2137 GLboolean res;
2138
2139 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2140 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2141
2142 FLUSH_VERTICES(ctx, 0, 0);
2143
2144 bufObj->Immutable = GL_TRUE;
2145 bufObj->MinMaxCacheDirty = true;
2146
2147 if (memObj) {
2148 res = bufferobj_data_mem(ctx, target, size, memObj, offset,
2149 GL_DYNAMIC_DRAW, bufObj);
2150 }
2151 else {
2152 res = _mesa_bufferobj_data(ctx, target, size, data, GL_DYNAMIC_DRAW,
2153 flags, bufObj);
2154 }
2155
2156 if (!res) {
2157 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2158 /* Even though the interaction between AMD_pinned_memory and
2159 * glBufferStorage is not described in the spec, Graham Sellers
2160 * said that it should behave the same as glBufferData.
2161 */
2162 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2163 }
2164 else {
2165 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2166 }
2167 }
2168 }
2169
2170
2171 static ALWAYS_INLINE void
inlined_buffer_storage(GLenum target,GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint memory,GLuint64 offset,bool dsa,bool mem,bool no_error,const char * func)2172 inlined_buffer_storage(GLenum target, GLuint buffer, GLsizeiptr size,
2173 const GLvoid *data, GLbitfield flags,
2174 GLuint memory, GLuint64 offset,
2175 bool dsa, bool mem, bool no_error, const char *func)
2176 {
2177 GET_CURRENT_CONTEXT(ctx);
2178 struct gl_buffer_object *bufObj;
2179 struct gl_memory_object *memObj = NULL;
2180
2181 if (mem) {
2182 if (!no_error) {
2183 if (!_mesa_has_EXT_memory_object(ctx)) {
2184 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(unsupported)", func);
2185 return;
2186 }
2187
2188 /* From the EXT_external_objects spec:
2189 *
2190 * "An INVALID_VALUE error is generated by BufferStorageMemEXT and
2191 * NamedBufferStorageMemEXT if <memory> is 0, or ..."
2192 */
2193 if (memory == 0) {
2194 _mesa_error(ctx, GL_INVALID_VALUE, "%s(memory == 0)", func);
2195 }
2196 }
2197
2198 memObj = _mesa_lookup_memory_object(ctx, memory);
2199 if (!memObj)
2200 return;
2201
2202 /* From the EXT_external_objects spec:
2203 *
2204 * "An INVALID_OPERATION error is generated if <memory> names a
2205 * valid memory object which has no associated memory."
2206 */
2207 if (!no_error && !memObj->Immutable) {
2208 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(no associated memory)",
2209 func);
2210 return;
2211 }
2212 }
2213
2214 if (dsa) {
2215 if (no_error) {
2216 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2217 } else {
2218 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2219 if (!bufObj)
2220 return;
2221 }
2222 } else {
2223 if (no_error) {
2224 struct gl_buffer_object **bufObjPtr =
2225 get_buffer_target(ctx, target, true);
2226 bufObj = *bufObjPtr;
2227 } else {
2228 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2229 if (!bufObj)
2230 return;
2231 }
2232 }
2233
2234 if (no_error || validate_buffer_storage(ctx, bufObj, size, flags, func))
2235 buffer_storage(ctx, bufObj, memObj, target, size, data, flags, offset, func);
2236 }
2237
2238
2239 void GLAPIENTRY
_mesa_BufferStorage_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2240 _mesa_BufferStorage_no_error(GLenum target, GLsizeiptr size,
2241 const GLvoid *data, GLbitfield flags)
2242 {
2243 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2244 false, false, true, "glBufferStorage");
2245 }
2246
2247
2248 void GLAPIENTRY
_mesa_BufferStorage(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2249 _mesa_BufferStorage(GLenum target, GLsizeiptr size, const GLvoid *data,
2250 GLbitfield flags)
2251 {
2252 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2253 false, false, false, "glBufferStorage");
2254 }
2255
2256 void GLAPIENTRY
_mesa_NamedBufferStorageEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2257 _mesa_NamedBufferStorageEXT(GLuint buffer, GLsizeiptr size,
2258 const GLvoid *data, GLbitfield flags)
2259 {
2260 GET_CURRENT_CONTEXT(ctx);
2261
2262 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2263 if (!handle_bind_buffer_gen(ctx, buffer,
2264 &bufObj, "glNamedBufferStorageEXT", false))
2265 return;
2266
2267 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2268 true, false, false, "glNamedBufferStorageEXT");
2269 }
2270
2271
2272 void GLAPIENTRY
_mesa_BufferStorageMemEXT(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2273 _mesa_BufferStorageMemEXT(GLenum target, GLsizeiptr size,
2274 GLuint memory, GLuint64 offset)
2275 {
2276 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2277 false, true, false, "glBufferStorageMemEXT");
2278 }
2279
2280
2281 void GLAPIENTRY
_mesa_BufferStorageMemEXT_no_error(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2282 _mesa_BufferStorageMemEXT_no_error(GLenum target, GLsizeiptr size,
2283 GLuint memory, GLuint64 offset)
2284 {
2285 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2286 false, true, true, "glBufferStorageMemEXT");
2287 }
2288
2289
2290 void GLAPIENTRY
_mesa_NamedBufferStorage_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2291 _mesa_NamedBufferStorage_no_error(GLuint buffer, GLsizeiptr size,
2292 const GLvoid *data, GLbitfield flags)
2293 {
2294 /* In direct state access, buffer objects have an unspecified target
2295 * since they are not required to be bound.
2296 */
2297 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2298 true, false, true, "glNamedBufferStorage");
2299 }
2300
2301
2302 void GLAPIENTRY
_mesa_NamedBufferStorage(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2303 _mesa_NamedBufferStorage(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2304 GLbitfield flags)
2305 {
2306 /* In direct state access, buffer objects have an unspecified target
2307 * since they are not required to be bound.
2308 */
2309 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2310 true, false, false, "glNamedBufferStorage");
2311 }
2312
2313 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2314 _mesa_NamedBufferStorageMemEXT(GLuint buffer, GLsizeiptr size,
2315 GLuint memory, GLuint64 offset)
2316 {
2317 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2318 true, true, false, "glNamedBufferStorageMemEXT");
2319 }
2320
2321
2322 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2323 _mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer, GLsizeiptr size,
2324 GLuint memory, GLuint64 offset)
2325 {
2326 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2327 true, true, true, "glNamedBufferStorageMemEXT");
2328 }
2329
2330
2331 static ALWAYS_INLINE void
buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func,bool no_error)2332 buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2333 GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage,
2334 const char *func, bool no_error)
2335 {
2336 bool valid_usage;
2337
2338 if (MESA_VERBOSE & VERBOSE_API) {
2339 _mesa_debug(ctx, "%s(%s, %ld, %p, %s)\n",
2340 func,
2341 _mesa_enum_to_string(target),
2342 (long int) size, data,
2343 _mesa_enum_to_string(usage));
2344 }
2345
2346 if (!no_error) {
2347 if (size < 0) {
2348 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", func);
2349 return;
2350 }
2351
2352 switch (usage) {
2353 case GL_STREAM_DRAW_ARB:
2354 valid_usage = (ctx->API != API_OPENGLES);
2355 break;
2356 case GL_STATIC_DRAW_ARB:
2357 case GL_DYNAMIC_DRAW_ARB:
2358 valid_usage = true;
2359 break;
2360 case GL_STREAM_READ_ARB:
2361 case GL_STREAM_COPY_ARB:
2362 case GL_STATIC_READ_ARB:
2363 case GL_STATIC_COPY_ARB:
2364 case GL_DYNAMIC_READ_ARB:
2365 case GL_DYNAMIC_COPY_ARB:
2366 valid_usage = _mesa_is_desktop_gl(ctx) || _mesa_is_gles3(ctx);
2367 break;
2368 default:
2369 valid_usage = false;
2370 break;
2371 }
2372
2373 if (!valid_usage) {
2374 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid usage: %s)", func,
2375 _mesa_enum_to_string(usage));
2376 return;
2377 }
2378
2379 if (bufObj->Immutable || bufObj->HandleAllocated) {
2380 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2381 return;
2382 }
2383 }
2384
2385 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2386 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2387
2388 FLUSH_VERTICES(ctx, 0, 0);
2389
2390 bufObj->MinMaxCacheDirty = true;
2391
2392 #ifdef VBO_DEBUG
2393 printf("glBufferDataARB(%u, sz %ld, from %p, usage 0x%x)\n",
2394 bufObj->Name, size, data, usage);
2395 #endif
2396
2397 #ifdef BOUNDS_CHECK
2398 size += 100;
2399 #endif
2400
2401 if (!_mesa_bufferobj_data(ctx, target, size, data, usage,
2402 GL_MAP_READ_BIT |
2403 GL_MAP_WRITE_BIT |
2404 GL_DYNAMIC_STORAGE_BIT,
2405 bufObj)) {
2406 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2407 if (!no_error) {
2408 /* From GL_AMD_pinned_memory:
2409 *
2410 * INVALID_OPERATION is generated by BufferData if <target> is
2411 * EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, and the store cannot be
2412 * mapped to the GPU address space.
2413 */
2414 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2415 }
2416 } else {
2417 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2418 }
2419 }
2420 }
2421
2422 static void
buffer_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2423 buffer_data_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2424 GLenum target, GLsizeiptr size, const GLvoid *data,
2425 GLenum usage, const char *func)
2426 {
2427 buffer_data(ctx, bufObj, target, size, data, usage, func, false);
2428 }
2429
2430 static void
buffer_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2431 buffer_data_no_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2432 GLenum target, GLsizeiptr size, const GLvoid *data,
2433 GLenum usage, const char *func)
2434 {
2435 buffer_data(ctx, bufObj, target, size, data, usage, func, true);
2436 }
2437
2438 void
_mesa_buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2439 _mesa_buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2440 GLenum target, GLsizeiptr size, const GLvoid *data,
2441 GLenum usage, const char *func)
2442 {
2443 buffer_data_error(ctx, bufObj, target, size, data, usage, func);
2444 }
2445
2446 void GLAPIENTRY
_mesa_BufferData_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2447 _mesa_BufferData_no_error(GLenum target, GLsizeiptr size, const GLvoid *data,
2448 GLenum usage)
2449 {
2450 GET_CURRENT_CONTEXT(ctx);
2451
2452 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2453 buffer_data_no_error(ctx, *bufObj, target, size, data, usage,
2454 "glBufferData");
2455 }
2456
2457 void GLAPIENTRY
_mesa_BufferData(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2458 _mesa_BufferData(GLenum target, GLsizeiptr size,
2459 const GLvoid *data, GLenum usage)
2460 {
2461 GET_CURRENT_CONTEXT(ctx);
2462 struct gl_buffer_object *bufObj;
2463
2464 bufObj = get_buffer(ctx, "glBufferData", target, GL_INVALID_OPERATION);
2465 if (!bufObj)
2466 return;
2467
2468 _mesa_buffer_data(ctx, bufObj, target, size, data, usage,
2469 "glBufferData");
2470 }
2471
2472 void GLAPIENTRY
_mesa_NamedBufferData_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2473 _mesa_NamedBufferData_no_error(GLuint buffer, GLsizeiptr size,
2474 const GLvoid *data, GLenum usage)
2475 {
2476 GET_CURRENT_CONTEXT(ctx);
2477
2478 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2479 buffer_data_no_error(ctx, bufObj, GL_NONE, size, data, usage,
2480 "glNamedBufferData");
2481 }
2482
2483 void GLAPIENTRY
_mesa_NamedBufferData(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2484 _mesa_NamedBufferData(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2485 GLenum usage)
2486 {
2487 GET_CURRENT_CONTEXT(ctx);
2488 struct gl_buffer_object *bufObj;
2489
2490 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glNamedBufferData");
2491 if (!bufObj)
2492 return;
2493
2494 /* In direct state access, buffer objects have an unspecified target since
2495 * they are not required to be bound.
2496 */
2497 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2498 "glNamedBufferData");
2499 }
2500
2501 void GLAPIENTRY
_mesa_NamedBufferDataEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2502 _mesa_NamedBufferDataEXT(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2503 GLenum usage)
2504 {
2505 GET_CURRENT_CONTEXT(ctx);
2506 struct gl_buffer_object *bufObj;
2507
2508 if (!buffer) {
2509 _mesa_error(ctx, GL_INVALID_OPERATION,
2510 "glNamedBufferDataEXT(buffer=0)");
2511 return;
2512 }
2513
2514 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2515 if (!handle_bind_buffer_gen(ctx, buffer,
2516 &bufObj, "glNamedBufferDataEXT", false))
2517 return;
2518
2519 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2520 "glNamedBufferDataEXT");
2521 }
2522
2523 static bool
validate_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const char * func)2524 validate_buffer_sub_data(struct gl_context *ctx,
2525 struct gl_buffer_object *bufObj,
2526 GLintptr offset, GLsizeiptr size,
2527 const char *func)
2528 {
2529 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2530 true, func)) {
2531 /* error already recorded */
2532 return false;
2533 }
2534
2535 if (bufObj->Immutable &&
2536 !(bufObj->StorageFlags & GL_DYNAMIC_STORAGE_BIT)) {
2537 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2538 return false;
2539 }
2540
2541 if ((bufObj->Usage == GL_STATIC_DRAW ||
2542 bufObj->Usage == GL_STATIC_COPY) &&
2543 bufObj->NumSubDataCalls >= BUFFER_WARNING_CALL_COUNT - 1) {
2544 /* If the application declared the buffer as static draw/copy or stream
2545 * draw, it should not be frequently modified with glBufferSubData.
2546 */
2547 BUFFER_USAGE_WARNING(ctx,
2548 "using %s(buffer %u, offset %u, size %u) to "
2549 "update a %s buffer",
2550 func, bufObj->Name, offset, size,
2551 _mesa_enum_to_string(bufObj->Usage));
2552 }
2553
2554 return true;
2555 }
2556
2557
2558 /**
2559 * Implementation for glBufferSubData and glNamedBufferSubData.
2560 *
2561 * \param ctx GL context.
2562 * \param bufObj The buffer object.
2563 * \param offset Offset of the first byte of the subdata range.
2564 * \param size Size, in bytes, of the subdata range.
2565 * \param data The data store.
2566 * \param func Name of calling function for recording errors.
2567 *
2568 */
2569 void
_mesa_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const GLvoid * data)2570 _mesa_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2571 GLintptr offset, GLsizeiptr size, const GLvoid *data)
2572 {
2573 if (size == 0)
2574 return;
2575
2576 bufObj->NumSubDataCalls++;
2577 bufObj->MinMaxCacheDirty = true;
2578
2579 _mesa_bufferobj_subdata(ctx, offset, size, data, bufObj);
2580 }
2581
2582
2583 static ALWAYS_INLINE void
buffer_sub_data(GLenum target,GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data,bool dsa,bool no_error,const char * func)2584 buffer_sub_data(GLenum target, GLuint buffer, GLintptr offset,
2585 GLsizeiptr size, const GLvoid *data,
2586 bool dsa, bool no_error, const char *func)
2587 {
2588 GET_CURRENT_CONTEXT(ctx);
2589 struct gl_buffer_object *bufObj;
2590
2591 if (dsa) {
2592 if (no_error) {
2593 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2594 } else {
2595 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2596 if (!bufObj)
2597 return;
2598 }
2599 } else {
2600 if (no_error) {
2601 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
2602 bufObj = *bufObjPtr;
2603 } else {
2604 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2605 if (!bufObj)
2606 return;
2607 }
2608 }
2609
2610 if (no_error || validate_buffer_sub_data(ctx, bufObj, offset, size, func))
2611 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2612 }
2613
2614
2615 void GLAPIENTRY
_mesa_BufferSubData_no_error(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2616 _mesa_BufferSubData_no_error(GLenum target, GLintptr offset,
2617 GLsizeiptr size, const GLvoid *data)
2618 {
2619 buffer_sub_data(target, 0, offset, size, data, false, true,
2620 "glBufferSubData");
2621 }
2622
2623
2624 void GLAPIENTRY
_mesa_BufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2625 _mesa_BufferSubData(GLenum target, GLintptr offset,
2626 GLsizeiptr size, const GLvoid *data)
2627 {
2628 buffer_sub_data(target, 0, offset, size, data, false, false,
2629 "glBufferSubData");
2630 }
2631
2632 void GLAPIENTRY
_mesa_NamedBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2633 _mesa_NamedBufferSubData_no_error(GLuint buffer, GLintptr offset,
2634 GLsizeiptr size, const GLvoid *data)
2635 {
2636 buffer_sub_data(0, buffer, offset, size, data, true, true,
2637 "glNamedBufferSubData");
2638 }
2639
2640 void GLAPIENTRY
_mesa_NamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2641 _mesa_NamedBufferSubData(GLuint buffer, GLintptr offset,
2642 GLsizeiptr size, const GLvoid *data)
2643 {
2644 buffer_sub_data(0, buffer, offset, size, data, true, false,
2645 "glNamedBufferSubData");
2646 }
2647
2648 void GLAPIENTRY
_mesa_NamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2649 _mesa_NamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2650 GLsizeiptr size, const GLvoid *data)
2651 {
2652 GET_CURRENT_CONTEXT(ctx);
2653 struct gl_buffer_object *bufObj;
2654
2655 if (!buffer) {
2656 _mesa_error(ctx, GL_INVALID_OPERATION,
2657 "glNamedBufferSubDataEXT(buffer=0)");
2658 return;
2659 }
2660
2661 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2662 if (!handle_bind_buffer_gen(ctx, buffer,
2663 &bufObj, "glNamedBufferSubDataEXT", false))
2664 return;
2665
2666 if (validate_buffer_sub_data(ctx, bufObj, offset, size,
2667 "glNamedBufferSubDataEXT")) {
2668 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2669 }
2670 }
2671
2672
2673 void GLAPIENTRY
_mesa_GetBufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,GLvoid * data)2674 _mesa_GetBufferSubData(GLenum target, GLintptr offset,
2675 GLsizeiptr size, GLvoid *data)
2676 {
2677 GET_CURRENT_CONTEXT(ctx);
2678 struct gl_buffer_object *bufObj;
2679
2680 bufObj = get_buffer(ctx, "glGetBufferSubData", target,
2681 GL_INVALID_OPERATION);
2682 if (!bufObj)
2683 return;
2684
2685 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2686 "glGetBufferSubData")) {
2687 return;
2688 }
2689
2690 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2691 }
2692
2693 void GLAPIENTRY
_mesa_GetNamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2694 _mesa_GetNamedBufferSubData(GLuint buffer, GLintptr offset,
2695 GLsizeiptr size, GLvoid *data)
2696 {
2697 GET_CURRENT_CONTEXT(ctx);
2698 struct gl_buffer_object *bufObj;
2699
2700 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
2701 "glGetNamedBufferSubData");
2702 if (!bufObj)
2703 return;
2704
2705 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2706 "glGetNamedBufferSubData")) {
2707 return;
2708 }
2709
2710 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2711 }
2712
2713
2714 void GLAPIENTRY
_mesa_GetNamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2715 _mesa_GetNamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2716 GLsizeiptr size, GLvoid *data)
2717 {
2718 GET_CURRENT_CONTEXT(ctx);
2719 struct gl_buffer_object *bufObj;
2720
2721 if (!buffer) {
2722 _mesa_error(ctx, GL_INVALID_OPERATION,
2723 "glGetNamedBufferSubDataEXT(buffer=0)");
2724 return;
2725 }
2726
2727 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2728 if (!handle_bind_buffer_gen(ctx, buffer,
2729 &bufObj, "glGetNamedBufferSubDataEXT", false))
2730 return;
2731
2732 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2733 "glGetNamedBufferSubDataEXT")) {
2734 return;
2735 }
2736
2737 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2738 }
2739
2740 /**
2741 * \param subdata true if caller is *SubData, false if *Data
2742 */
2743 static ALWAYS_INLINE void
clear_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata,bool no_error)2744 clear_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2745 GLenum internalformat, GLintptr offset, GLsizeiptr size,
2746 GLenum format, GLenum type, const GLvoid *data,
2747 const char *func, bool subdata, bool no_error)
2748 {
2749 mesa_format mesaFormat;
2750 GLubyte clearValue[MAX_PIXEL_BYTES];
2751 GLsizeiptr clearValueSize;
2752
2753 /* This checks for disallowed mappings. */
2754 if (!no_error && !buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2755 subdata, func)) {
2756 return;
2757 }
2758
2759 if (no_error) {
2760 mesaFormat = _mesa_get_texbuffer_format(ctx, internalformat);
2761 } else {
2762 mesaFormat = validate_clear_buffer_format(ctx, internalformat,
2763 format, type, func);
2764 }
2765
2766 if (mesaFormat == MESA_FORMAT_NONE)
2767 return;
2768
2769 clearValueSize = _mesa_get_format_bytes(mesaFormat);
2770 if (!no_error &&
2771 (offset % clearValueSize != 0 || size % clearValueSize != 0)) {
2772 _mesa_error(ctx, GL_INVALID_VALUE,
2773 "%s(offset or size is not a multiple of "
2774 "internalformat size)", func);
2775 return;
2776 }
2777
2778 /* Bail early. Negative size has already been checked. */
2779 if (size == 0)
2780 return;
2781
2782 bufObj->MinMaxCacheDirty = true;
2783
2784 if (!ctx->pipe->clear_buffer) {
2785 clear_buffer_subdata_sw(ctx, offset, size,
2786 data, clearValueSize, bufObj);
2787 return;
2788 }
2789
2790 if (!data)
2791 memset(clearValue, 0, MAX_PIXEL_BYTES);
2792 else if (!convert_clear_buffer_data(ctx, mesaFormat, clearValue,
2793 format, type, data, func)) {
2794 return;
2795 }
2796
2797 ctx->pipe->clear_buffer(ctx->pipe, bufObj->buffer, offset, size,
2798 clearValue, clearValueSize);
2799 }
2800
2801 static void
clear_buffer_sub_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2802 clear_buffer_sub_data_error(struct gl_context *ctx,
2803 struct gl_buffer_object *bufObj,
2804 GLenum internalformat, GLintptr offset,
2805 GLsizeiptr size, GLenum format, GLenum type,
2806 const GLvoid *data, const char *func, bool subdata)
2807 {
2808 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2809 type, data, func, subdata, false);
2810 }
2811
2812
2813 static void
clear_buffer_sub_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2814 clear_buffer_sub_data_no_error(struct gl_context *ctx,
2815 struct gl_buffer_object *bufObj,
2816 GLenum internalformat, GLintptr offset,
2817 GLsizeiptr size, GLenum format, GLenum type,
2818 const GLvoid *data, const char *func,
2819 bool subdata)
2820 {
2821 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2822 type, data, func, subdata, true);
2823 }
2824
2825
2826 void GLAPIENTRY
_mesa_ClearBufferData_no_error(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2827 _mesa_ClearBufferData_no_error(GLenum target, GLenum internalformat,
2828 GLenum format, GLenum type, const GLvoid *data)
2829 {
2830 GET_CURRENT_CONTEXT(ctx);
2831
2832 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2833 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, 0,
2834 (*bufObj)->Size, format, type, data,
2835 "glClearBufferData", false);
2836 }
2837
2838
2839 void GLAPIENTRY
_mesa_ClearBufferData(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2840 _mesa_ClearBufferData(GLenum target, GLenum internalformat, GLenum format,
2841 GLenum type, const GLvoid *data)
2842 {
2843 GET_CURRENT_CONTEXT(ctx);
2844 struct gl_buffer_object *bufObj;
2845
2846 bufObj = get_buffer(ctx, "glClearBufferData", target, GL_INVALID_VALUE);
2847 if (!bufObj)
2848 return;
2849
2850 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2851 format, type, data, "glClearBufferData", false);
2852 }
2853
2854
2855 void GLAPIENTRY
_mesa_ClearNamedBufferData_no_error(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2856 _mesa_ClearNamedBufferData_no_error(GLuint buffer, GLenum internalformat,
2857 GLenum format, GLenum type,
2858 const GLvoid *data)
2859 {
2860 GET_CURRENT_CONTEXT(ctx);
2861
2862 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2863 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2864 format, type, data, "glClearNamedBufferData",
2865 false);
2866 }
2867
2868
2869 void GLAPIENTRY
_mesa_ClearNamedBufferData(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2870 _mesa_ClearNamedBufferData(GLuint buffer, GLenum internalformat,
2871 GLenum format, GLenum type, const GLvoid *data)
2872 {
2873 GET_CURRENT_CONTEXT(ctx);
2874 struct gl_buffer_object *bufObj;
2875
2876 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glClearNamedBufferData");
2877 if (!bufObj)
2878 return;
2879
2880 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2881 format, type, data, "glClearNamedBufferData",
2882 false);
2883 }
2884
2885
2886 void GLAPIENTRY
_mesa_ClearNamedBufferDataEXT(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2887 _mesa_ClearNamedBufferDataEXT(GLuint buffer, GLenum internalformat,
2888 GLenum format, GLenum type, const GLvoid *data)
2889 {
2890 GET_CURRENT_CONTEXT(ctx);
2891 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2892 if (!handle_bind_buffer_gen(ctx, buffer,
2893 &bufObj, "glClearNamedBufferDataEXT", false))
2894 return;
2895
2896 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2897 format, type, data, "glClearNamedBufferDataEXT",
2898 false);
2899 }
2900
2901
2902 void GLAPIENTRY
_mesa_ClearBufferSubData_no_error(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2903 _mesa_ClearBufferSubData_no_error(GLenum target, GLenum internalformat,
2904 GLintptr offset, GLsizeiptr size,
2905 GLenum format, GLenum type,
2906 const GLvoid *data)
2907 {
2908 GET_CURRENT_CONTEXT(ctx);
2909
2910 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2911 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, offset, size,
2912 format, type, data, "glClearBufferSubData",
2913 true);
2914 }
2915
2916
2917 void GLAPIENTRY
_mesa_ClearBufferSubData(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2918 _mesa_ClearBufferSubData(GLenum target, GLenum internalformat,
2919 GLintptr offset, GLsizeiptr size,
2920 GLenum format, GLenum type,
2921 const GLvoid *data)
2922 {
2923 GET_CURRENT_CONTEXT(ctx);
2924 struct gl_buffer_object *bufObj;
2925
2926 bufObj = get_buffer(ctx, "glClearBufferSubData", target, GL_INVALID_VALUE);
2927 if (!bufObj)
2928 return;
2929
2930 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2931 format, type, data, "glClearBufferSubData",
2932 true);
2933 }
2934
2935
2936 void GLAPIENTRY
_mesa_ClearNamedBufferSubData_no_error(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2937 _mesa_ClearNamedBufferSubData_no_error(GLuint buffer, GLenum internalformat,
2938 GLintptr offset, GLsizeiptr size,
2939 GLenum format, GLenum type,
2940 const GLvoid *data)
2941 {
2942 GET_CURRENT_CONTEXT(ctx);
2943
2944 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2945 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, offset, size,
2946 format, type, data,
2947 "glClearNamedBufferSubData", true);
2948 }
2949
2950
2951 void GLAPIENTRY
_mesa_ClearNamedBufferSubData(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2952 _mesa_ClearNamedBufferSubData(GLuint buffer, GLenum internalformat,
2953 GLintptr offset, GLsizeiptr size,
2954 GLenum format, GLenum type,
2955 const GLvoid *data)
2956 {
2957 GET_CURRENT_CONTEXT(ctx);
2958 struct gl_buffer_object *bufObj;
2959
2960 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
2961 "glClearNamedBufferSubData");
2962 if (!bufObj)
2963 return;
2964
2965 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2966 format, type, data, "glClearNamedBufferSubData",
2967 true);
2968 }
2969
2970 void GLAPIENTRY
_mesa_ClearNamedBufferSubDataEXT(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2971 _mesa_ClearNamedBufferSubDataEXT(GLuint buffer, GLenum internalformat,
2972 GLintptr offset, GLsizeiptr size,
2973 GLenum format, GLenum type,
2974 const GLvoid *data)
2975 {
2976 GET_CURRENT_CONTEXT(ctx);
2977 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2978 if (!handle_bind_buffer_gen(ctx, buffer,
2979 &bufObj, "glClearNamedBufferSubDataEXT", false))
2980 return;
2981
2982 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2983 format, type, data, "glClearNamedBufferSubDataEXT",
2984 true);
2985 }
2986
2987 static GLboolean
unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj)2988 unmap_buffer(struct gl_context *ctx, struct gl_buffer_object *bufObj)
2989 {
2990 GLboolean status = _mesa_bufferobj_unmap(ctx, bufObj, MAP_USER);
2991 bufObj->Mappings[MAP_USER].AccessFlags = 0;
2992 assert(bufObj->Mappings[MAP_USER].Pointer == NULL);
2993 assert(bufObj->Mappings[MAP_USER].Offset == 0);
2994 assert(bufObj->Mappings[MAP_USER].Length == 0);
2995
2996 return status;
2997 }
2998
2999 static GLboolean
validate_and_unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj,const char * func)3000 validate_and_unmap_buffer(struct gl_context *ctx,
3001 struct gl_buffer_object *bufObj,
3002 const char *func)
3003 {
3004 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
3005
3006 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3007 _mesa_error(ctx, GL_INVALID_OPERATION,
3008 "%s(buffer is not mapped)", func);
3009 return GL_FALSE;
3010 }
3011
3012 #ifdef BOUNDS_CHECK
3013 if (bufObj->Mappings[MAP_USER].AccessFlags != GL_READ_ONLY_ARB) {
3014 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3015 GLuint i;
3016 /* check that last 100 bytes are still = magic value */
3017 for (i = 0; i < 100; i++) {
3018 GLuint pos = bufObj->Size - i - 1;
3019 if (buf[pos] != 123) {
3020 _mesa_warning(ctx, "Out of bounds buffer object write detected"
3021 " at position %d (value = %u)\n",
3022 pos, buf[pos]);
3023 }
3024 }
3025 }
3026 #endif
3027
3028 #ifdef VBO_DEBUG
3029 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT) {
3030 GLuint i, unchanged = 0;
3031 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3032 GLint pos = -1;
3033 /* check which bytes changed */
3034 for (i = 0; i < bufObj->Size - 1; i++) {
3035 if (b[i] == (i & 0xff) && b[i+1] == ((i+1) & 0xff)) {
3036 unchanged++;
3037 if (pos == -1)
3038 pos = i;
3039 }
3040 }
3041 if (unchanged) {
3042 printf("glUnmapBufferARB(%u): %u of %ld unchanged, starting at %d\n",
3043 bufObj->Name, unchanged, bufObj->Size, pos);
3044 }
3045 }
3046 #endif
3047
3048 return unmap_buffer(ctx, bufObj);
3049 }
3050
3051 GLboolean GLAPIENTRY
_mesa_UnmapBuffer_no_error(GLenum target)3052 _mesa_UnmapBuffer_no_error(GLenum target)
3053 {
3054 GET_CURRENT_CONTEXT(ctx);
3055 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3056 struct gl_buffer_object *bufObj = *bufObjPtr;
3057
3058 return unmap_buffer(ctx, bufObj);
3059 }
3060
3061 GLboolean GLAPIENTRY
_mesa_UnmapBuffer(GLenum target)3062 _mesa_UnmapBuffer(GLenum target)
3063 {
3064 GET_CURRENT_CONTEXT(ctx);
3065 struct gl_buffer_object *bufObj;
3066
3067 bufObj = get_buffer(ctx, "glUnmapBuffer", target, GL_INVALID_OPERATION);
3068 if (!bufObj)
3069 return GL_FALSE;
3070
3071 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapBuffer");
3072 }
3073
3074 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)3075 _mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)
3076 {
3077 GET_CURRENT_CONTEXT(ctx);
3078 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3079
3080 return unmap_buffer(ctx, bufObj);
3081 }
3082
3083 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT(GLuint buffer)3084 _mesa_UnmapNamedBufferEXT(GLuint buffer)
3085 {
3086 GET_CURRENT_CONTEXT(ctx);
3087 struct gl_buffer_object *bufObj;
3088
3089 if (!buffer) {
3090 _mesa_error(ctx, GL_INVALID_OPERATION,
3091 "glUnmapNamedBufferEXT(buffer=0)");
3092 return GL_FALSE;
3093 }
3094
3095 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glUnmapNamedBuffer");
3096 if (!bufObj)
3097 return GL_FALSE;
3098
3099 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapNamedBuffer");
3100 }
3101
3102
3103 static bool
get_buffer_parameter(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum pname,GLint64 * params,const char * func)3104 get_buffer_parameter(struct gl_context *ctx,
3105 struct gl_buffer_object *bufObj, GLenum pname,
3106 GLint64 *params, const char *func)
3107 {
3108 switch (pname) {
3109 case GL_BUFFER_SIZE_ARB:
3110 *params = bufObj->Size;
3111 break;
3112 case GL_BUFFER_USAGE_ARB:
3113 *params = bufObj->Usage;
3114 break;
3115 case GL_BUFFER_ACCESS_ARB:
3116 *params = simplified_access_mode(ctx,
3117 bufObj->Mappings[MAP_USER].AccessFlags);
3118 break;
3119 case GL_BUFFER_MAPPED_ARB:
3120 *params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
3121 break;
3122 case GL_BUFFER_ACCESS_FLAGS:
3123 if (!ctx->Extensions.ARB_map_buffer_range)
3124 goto invalid_pname;
3125 *params = bufObj->Mappings[MAP_USER].AccessFlags;
3126 break;
3127 case GL_BUFFER_MAP_OFFSET:
3128 if (!ctx->Extensions.ARB_map_buffer_range)
3129 goto invalid_pname;
3130 *params = bufObj->Mappings[MAP_USER].Offset;
3131 break;
3132 case GL_BUFFER_MAP_LENGTH:
3133 if (!ctx->Extensions.ARB_map_buffer_range)
3134 goto invalid_pname;
3135 *params = bufObj->Mappings[MAP_USER].Length;
3136 break;
3137 case GL_BUFFER_IMMUTABLE_STORAGE:
3138 if (!ctx->Extensions.ARB_buffer_storage)
3139 goto invalid_pname;
3140 *params = bufObj->Immutable;
3141 break;
3142 case GL_BUFFER_STORAGE_FLAGS:
3143 if (!ctx->Extensions.ARB_buffer_storage)
3144 goto invalid_pname;
3145 *params = bufObj->StorageFlags;
3146 break;
3147 default:
3148 goto invalid_pname;
3149 }
3150
3151 return true;
3152
3153 invalid_pname:
3154 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid pname: %s)", func,
3155 _mesa_enum_to_string(pname));
3156 return false;
3157 }
3158
3159 void GLAPIENTRY
_mesa_GetBufferParameteriv(GLenum target,GLenum pname,GLint * params)3160 _mesa_GetBufferParameteriv(GLenum target, GLenum pname, GLint *params)
3161 {
3162 GET_CURRENT_CONTEXT(ctx);
3163 struct gl_buffer_object *bufObj;
3164 GLint64 parameter;
3165
3166 bufObj = get_buffer(ctx, "glGetBufferParameteriv", target,
3167 GL_INVALID_OPERATION);
3168 if (!bufObj)
3169 return;
3170
3171 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3172 "glGetBufferParameteriv"))
3173 return; /* Error already recorded. */
3174
3175 *params = (GLint) parameter;
3176 }
3177
3178 void GLAPIENTRY
_mesa_GetBufferParameteri64v(GLenum target,GLenum pname,GLint64 * params)3179 _mesa_GetBufferParameteri64v(GLenum target, GLenum pname, GLint64 *params)
3180 {
3181 GET_CURRENT_CONTEXT(ctx);
3182 struct gl_buffer_object *bufObj;
3183 GLint64 parameter;
3184
3185 bufObj = get_buffer(ctx, "glGetBufferParameteri64v", target,
3186 GL_INVALID_OPERATION);
3187 if (!bufObj)
3188 return;
3189
3190 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3191 "glGetBufferParameteri64v"))
3192 return; /* Error already recorded. */
3193
3194 *params = parameter;
3195 }
3196
3197 void GLAPIENTRY
_mesa_GetNamedBufferParameteriv(GLuint buffer,GLenum pname,GLint * params)3198 _mesa_GetNamedBufferParameteriv(GLuint buffer, GLenum pname, GLint *params)
3199 {
3200 GET_CURRENT_CONTEXT(ctx);
3201 struct gl_buffer_object *bufObj;
3202 GLint64 parameter;
3203
3204 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3205 "glGetNamedBufferParameteriv");
3206 if (!bufObj)
3207 return;
3208
3209 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3210 "glGetNamedBufferParameteriv"))
3211 return; /* Error already recorded. */
3212
3213 *params = (GLint) parameter;
3214 }
3215
3216 void GLAPIENTRY
_mesa_GetNamedBufferParameterivEXT(GLuint buffer,GLenum pname,GLint * params)3217 _mesa_GetNamedBufferParameterivEXT(GLuint buffer, GLenum pname, GLint *params)
3218 {
3219 GET_CURRENT_CONTEXT(ctx);
3220 struct gl_buffer_object *bufObj;
3221 GLint64 parameter;
3222
3223 if (!buffer) {
3224 _mesa_error(ctx, GL_INVALID_OPERATION,
3225 "glGetNamedBufferParameterivEXT: buffer=0");
3226 return;
3227 }
3228
3229 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3230 if (!handle_bind_buffer_gen(ctx, buffer,
3231 &bufObj, "glGetNamedBufferParameterivEXT", false))
3232 return;
3233
3234 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3235 "glGetNamedBufferParameterivEXT"))
3236 return; /* Error already recorded. */
3237
3238 *params = (GLint) parameter;
3239 }
3240
3241 void GLAPIENTRY
_mesa_GetNamedBufferParameteri64v(GLuint buffer,GLenum pname,GLint64 * params)3242 _mesa_GetNamedBufferParameteri64v(GLuint buffer, GLenum pname,
3243 GLint64 *params)
3244 {
3245 GET_CURRENT_CONTEXT(ctx);
3246 struct gl_buffer_object *bufObj;
3247 GLint64 parameter;
3248
3249 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3250 "glGetNamedBufferParameteri64v");
3251 if (!bufObj)
3252 return;
3253
3254 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3255 "glGetNamedBufferParameteri64v"))
3256 return; /* Error already recorded. */
3257
3258 *params = parameter;
3259 }
3260
3261
3262 void GLAPIENTRY
_mesa_GetBufferPointerv(GLenum target,GLenum pname,GLvoid ** params)3263 _mesa_GetBufferPointerv(GLenum target, GLenum pname, GLvoid **params)
3264 {
3265 GET_CURRENT_CONTEXT(ctx);
3266 struct gl_buffer_object *bufObj;
3267
3268 if (pname != GL_BUFFER_MAP_POINTER) {
3269 _mesa_error(ctx, GL_INVALID_ENUM, "glGetBufferPointerv(pname != "
3270 "GL_BUFFER_MAP_POINTER)");
3271 return;
3272 }
3273
3274 bufObj = get_buffer(ctx, "glGetBufferPointerv", target,
3275 GL_INVALID_OPERATION);
3276 if (!bufObj)
3277 return;
3278
3279 *params = bufObj->Mappings[MAP_USER].Pointer;
3280 }
3281
3282 void GLAPIENTRY
_mesa_GetNamedBufferPointerv(GLuint buffer,GLenum pname,GLvoid ** params)3283 _mesa_GetNamedBufferPointerv(GLuint buffer, GLenum pname, GLvoid **params)
3284 {
3285 GET_CURRENT_CONTEXT(ctx);
3286 struct gl_buffer_object *bufObj;
3287
3288 if (pname != GL_BUFFER_MAP_POINTER) {
3289 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointerv(pname != "
3290 "GL_BUFFER_MAP_POINTER)");
3291 return;
3292 }
3293
3294 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3295 "glGetNamedBufferPointerv");
3296 if (!bufObj)
3297 return;
3298
3299 *params = bufObj->Mappings[MAP_USER].Pointer;
3300 }
3301
3302 void GLAPIENTRY
_mesa_GetNamedBufferPointervEXT(GLuint buffer,GLenum pname,GLvoid ** params)3303 _mesa_GetNamedBufferPointervEXT(GLuint buffer, GLenum pname, GLvoid **params)
3304 {
3305 GET_CURRENT_CONTEXT(ctx);
3306 struct gl_buffer_object *bufObj;
3307
3308 if (!buffer) {
3309 _mesa_error(ctx, GL_INVALID_OPERATION,
3310 "glGetNamedBufferPointervEXT(buffer=0)");
3311 return;
3312 }
3313 if (pname != GL_BUFFER_MAP_POINTER) {
3314 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointervEXT(pname != "
3315 "GL_BUFFER_MAP_POINTER)");
3316 return;
3317 }
3318
3319 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3320 if (!handle_bind_buffer_gen(ctx, buffer,
3321 &bufObj, "glGetNamedBufferPointervEXT", false))
3322 return;
3323
3324 *params = bufObj->Mappings[MAP_USER].Pointer;
3325 }
3326
3327 static void
copy_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size,const char * func)3328 copy_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *src,
3329 struct gl_buffer_object *dst, GLintptr readOffset,
3330 GLintptr writeOffset, GLsizeiptr size, const char *func)
3331 {
3332 if (_mesa_check_disallowed_mapping(src)) {
3333 _mesa_error(ctx, GL_INVALID_OPERATION,
3334 "%s(readBuffer is mapped)", func);
3335 return;
3336 }
3337
3338 if (_mesa_check_disallowed_mapping(dst)) {
3339 _mesa_error(ctx, GL_INVALID_OPERATION,
3340 "%s(writeBuffer is mapped)", func);
3341 return;
3342 }
3343
3344 if (readOffset < 0) {
3345 _mesa_error(ctx, GL_INVALID_VALUE,
3346 "%s(readOffset %d < 0)", func, (int) readOffset);
3347 return;
3348 }
3349
3350 if (writeOffset < 0) {
3351 _mesa_error(ctx, GL_INVALID_VALUE,
3352 "%s(writeOffset %d < 0)", func, (int) writeOffset);
3353 return;
3354 }
3355
3356 if (size < 0) {
3357 _mesa_error(ctx, GL_INVALID_VALUE,
3358 "%s(size %d < 0)", func, (int) size);
3359 return;
3360 }
3361
3362 if (size > src->Size || readOffset > src->Size - size) {
3363 _mesa_error(ctx, GL_INVALID_VALUE,
3364 "%s(readOffset %d + size %d > src_buffer_size %d)", func,
3365 (int) readOffset, (int) size, (int) src->Size);
3366 return;
3367 }
3368
3369 if (size > dst->Size || writeOffset > dst->Size - size) {
3370 _mesa_error(ctx, GL_INVALID_VALUE,
3371 "%s(writeOffset %d + size %d > dst_buffer_size %d)", func,
3372 (int) writeOffset, (int) size, (int) dst->Size);
3373 return;
3374 }
3375
3376 if (src == dst) {
3377 if (readOffset + size <= writeOffset) {
3378 /* OK */
3379 }
3380 else if (writeOffset + size <= readOffset) {
3381 /* OK */
3382 }
3383 else {
3384 /* overlapping src/dst is illegal */
3385 _mesa_error(ctx, GL_INVALID_VALUE,
3386 "%s(overlapping src/dst)", func);
3387 return;
3388 }
3389 }
3390
3391 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset, size);
3392 }
3393
3394 void GLAPIENTRY
_mesa_CopyBufferSubData_no_error(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3395 _mesa_CopyBufferSubData_no_error(GLenum readTarget, GLenum writeTarget,
3396 GLintptr readOffset, GLintptr writeOffset,
3397 GLsizeiptr size)
3398 {
3399 GET_CURRENT_CONTEXT(ctx);
3400
3401 struct gl_buffer_object **src_ptr = get_buffer_target(ctx, readTarget, true);
3402 struct gl_buffer_object *src = *src_ptr;
3403
3404 struct gl_buffer_object **dst_ptr = get_buffer_target(ctx, writeTarget, true);
3405 struct gl_buffer_object *dst = *dst_ptr;
3406
3407 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3408 size);
3409 }
3410
3411 void GLAPIENTRY
_mesa_CopyBufferSubData(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3412 _mesa_CopyBufferSubData(GLenum readTarget, GLenum writeTarget,
3413 GLintptr readOffset, GLintptr writeOffset,
3414 GLsizeiptr size)
3415 {
3416 GET_CURRENT_CONTEXT(ctx);
3417 struct gl_buffer_object *src, *dst;
3418
3419 src = get_buffer(ctx, "glCopyBufferSubData", readTarget,
3420 GL_INVALID_OPERATION);
3421 if (!src)
3422 return;
3423
3424 dst = get_buffer(ctx, "glCopyBufferSubData", writeTarget,
3425 GL_INVALID_OPERATION);
3426 if (!dst)
3427 return;
3428
3429 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3430 "glCopyBufferSubData");
3431 }
3432
3433 void GLAPIENTRY
_mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3434 _mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer, GLuint writeBuffer,
3435 GLintptr readOffset, GLintptr writeOffset,
3436 GLsizeiptr size)
3437 {
3438 GET_CURRENT_CONTEXT(ctx);
3439 struct gl_buffer_object *src, *dst;
3440
3441 src = _mesa_lookup_bufferobj(ctx, readBuffer);
3442 if (!handle_bind_buffer_gen(ctx, readBuffer,
3443 &src,
3444 "glNamedCopyBufferSubDataEXT", false))
3445 return;
3446
3447 dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3448 if (!handle_bind_buffer_gen(ctx, writeBuffer,
3449 &dst, "glNamedCopyBufferSubDataEXT", false))
3450 return;
3451
3452 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3453 "glNamedCopyBufferSubDataEXT");
3454 }
3455
3456 void GLAPIENTRY
_mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3457 _mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer, GLuint writeBuffer,
3458 GLintptr readOffset,
3459 GLintptr writeOffset, GLsizeiptr size)
3460 {
3461 GET_CURRENT_CONTEXT(ctx);
3462
3463 struct gl_buffer_object *src = _mesa_lookup_bufferobj(ctx, readBuffer);
3464 struct gl_buffer_object *dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3465
3466 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3467 size);
3468 }
3469
3470 void GLAPIENTRY
_mesa_CopyNamedBufferSubData(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3471 _mesa_CopyNamedBufferSubData(GLuint readBuffer, GLuint writeBuffer,
3472 GLintptr readOffset, GLintptr writeOffset,
3473 GLsizeiptr size)
3474 {
3475 GET_CURRENT_CONTEXT(ctx);
3476 struct gl_buffer_object *src, *dst;
3477
3478 src = _mesa_lookup_bufferobj_err(ctx, readBuffer,
3479 "glCopyNamedBufferSubData");
3480 if (!src)
3481 return;
3482
3483 dst = _mesa_lookup_bufferobj_err(ctx, writeBuffer,
3484 "glCopyNamedBufferSubData");
3485 if (!dst)
3486 return;
3487
3488 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3489 "glCopyNamedBufferSubData");
3490 }
3491
3492 void GLAPIENTRY
_mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer,GLuint srcOffset,GLuint dstTargetOrName,GLintptr dstOffset,GLsizeiptr size,GLboolean named,GLboolean ext_dsa)3493 _mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer, GLuint srcOffset,
3494 GLuint dstTargetOrName, GLintptr dstOffset,
3495 GLsizeiptr size, GLboolean named,
3496 GLboolean ext_dsa)
3497 {
3498 GET_CURRENT_CONTEXT(ctx);
3499 struct gl_buffer_object *src = (struct gl_buffer_object *)srcBuffer;
3500 struct gl_buffer_object *dst;
3501 const char *func;
3502
3503 /* Handle behavior for all 3 variants. */
3504 if (named && ext_dsa) {
3505 func = "glNamedBufferSubDataEXT";
3506 dst = _mesa_lookup_bufferobj(ctx, dstTargetOrName);
3507 if (!handle_bind_buffer_gen(ctx, dstTargetOrName, &dst, func, false))
3508 goto done;
3509 } else if (named) {
3510 func = "glNamedBufferSubData";
3511 dst = _mesa_lookup_bufferobj_err(ctx, dstTargetOrName, func);
3512 if (!dst)
3513 goto done;
3514 } else {
3515 assert(!ext_dsa);
3516 func = "glBufferSubData";
3517 dst = get_buffer(ctx, func, dstTargetOrName, GL_INVALID_OPERATION);
3518 if (!dst)
3519 goto done;
3520 }
3521
3522 if (!validate_buffer_sub_data(ctx, dst, dstOffset, size, func))
3523 goto done; /* the error is already set */
3524
3525 bufferobj_copy_subdata(ctx, src, dst, srcOffset, dstOffset, size);
3526
3527 done:
3528 /* The caller passes the reference to this function, so unreference it. */
3529 _mesa_reference_buffer_object(ctx, &src, NULL);
3530 }
3531
3532 static bool
validate_map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3533 validate_map_buffer_range(struct gl_context *ctx,
3534 struct gl_buffer_object *bufObj, GLintptr offset,
3535 GLsizeiptr length, GLbitfield access,
3536 const char *func)
3537 {
3538 GLbitfield allowed_access;
3539
3540 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, false);
3541
3542 if (offset < 0) {
3543 _mesa_error(ctx, GL_INVALID_VALUE,
3544 "%s(offset %ld < 0)", func, (long) offset);
3545 return false;
3546 }
3547
3548 if (length < 0) {
3549 _mesa_error(ctx, GL_INVALID_VALUE,
3550 "%s(length %ld < 0)", func, (long) length);
3551 return false;
3552 }
3553
3554 /* Page 38 of the PDF of the OpenGL ES 3.0 spec says:
3555 *
3556 * "An INVALID_OPERATION error is generated for any of the following
3557 * conditions:
3558 *
3559 * * <length> is zero."
3560 *
3561 * Additionally, page 94 of the PDF of the OpenGL 4.5 core spec
3562 * (30.10.2014) also says this, so it's no longer allowed for desktop GL,
3563 * either.
3564 */
3565 if (length == 0) {
3566 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(length = 0)", func);
3567 return false;
3568 }
3569
3570 allowed_access = GL_MAP_READ_BIT |
3571 GL_MAP_WRITE_BIT |
3572 GL_MAP_INVALIDATE_RANGE_BIT |
3573 GL_MAP_INVALIDATE_BUFFER_BIT |
3574 GL_MAP_FLUSH_EXPLICIT_BIT |
3575 GL_MAP_UNSYNCHRONIZED_BIT;
3576
3577 if (ctx->Extensions.ARB_buffer_storage) {
3578 allowed_access |= GL_MAP_PERSISTENT_BIT |
3579 GL_MAP_COHERENT_BIT;
3580 }
3581
3582 if (access & ~allowed_access) {
3583 /* generate an error if any bits other than those allowed are set */
3584 _mesa_error(ctx, GL_INVALID_VALUE,
3585 "%s(access has undefined bits set)", func);
3586 return false;
3587 }
3588
3589 if ((access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == 0) {
3590 _mesa_error(ctx, GL_INVALID_OPERATION,
3591 "%s(access indicates neither read or write)", func);
3592 return false;
3593 }
3594
3595 if ((access & GL_MAP_READ_BIT) &&
3596 (access & (GL_MAP_INVALIDATE_RANGE_BIT |
3597 GL_MAP_INVALIDATE_BUFFER_BIT |
3598 GL_MAP_UNSYNCHRONIZED_BIT))) {
3599 _mesa_error(ctx, GL_INVALID_OPERATION,
3600 "%s(read access with disallowed bits)", func);
3601 return false;
3602 }
3603
3604 if ((access & GL_MAP_FLUSH_EXPLICIT_BIT) &&
3605 ((access & GL_MAP_WRITE_BIT) == 0)) {
3606 _mesa_error(ctx, GL_INVALID_OPERATION,
3607 "%s(access has flush explicit without write)", func);
3608 return false;
3609 }
3610
3611 if (access & GL_MAP_READ_BIT &&
3612 !(bufObj->StorageFlags & GL_MAP_READ_BIT)) {
3613 _mesa_error(ctx, GL_INVALID_OPERATION,
3614 "%s(buffer does not allow read access)", func);
3615 return false;
3616 }
3617
3618 if (access & GL_MAP_WRITE_BIT &&
3619 !(bufObj->StorageFlags & GL_MAP_WRITE_BIT)) {
3620 _mesa_error(ctx, GL_INVALID_OPERATION,
3621 "%s(buffer does not allow write access)", func);
3622 return false;
3623 }
3624
3625 if (access & GL_MAP_COHERENT_BIT &&
3626 !(bufObj->StorageFlags & GL_MAP_COHERENT_BIT)) {
3627 _mesa_error(ctx, GL_INVALID_OPERATION,
3628 "%s(buffer does not allow coherent access)", func);
3629 return false;
3630 }
3631
3632 if (access & GL_MAP_PERSISTENT_BIT &&
3633 !(bufObj->StorageFlags & GL_MAP_PERSISTENT_BIT)) {
3634 _mesa_error(ctx, GL_INVALID_OPERATION,
3635 "%s(buffer does not allow persistent access)", func);
3636 return false;
3637 }
3638
3639 if (offset + length > bufObj->Size) {
3640 _mesa_error(ctx, GL_INVALID_VALUE,
3641 "%s(offset %lu + length %lu > buffer_size %lu)", func,
3642 (unsigned long) offset, (unsigned long) length,
3643 (unsigned long) bufObj->Size);
3644 return false;
3645 }
3646
3647 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3648 _mesa_error(ctx, GL_INVALID_OPERATION,
3649 "%s(buffer already mapped)", func);
3650 return false;
3651 }
3652
3653 if (access & GL_MAP_WRITE_BIT) {
3654 bufObj->NumMapBufferWriteCalls++;
3655 if ((bufObj->Usage == GL_STATIC_DRAW ||
3656 bufObj->Usage == GL_STATIC_COPY) &&
3657 bufObj->NumMapBufferWriteCalls >= BUFFER_WARNING_CALL_COUNT) {
3658 BUFFER_USAGE_WARNING(ctx,
3659 "using %s(buffer %u, offset %u, length %u) to "
3660 "update a %s buffer",
3661 func, bufObj->Name, offset, length,
3662 _mesa_enum_to_string(bufObj->Usage));
3663 }
3664 }
3665
3666 return true;
3667 }
3668
3669 static void *
map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3670 map_buffer_range(struct gl_context *ctx, struct gl_buffer_object *bufObj,
3671 GLintptr offset, GLsizeiptr length, GLbitfield access,
3672 const char *func)
3673 {
3674 if (!bufObj->Size) {
3675 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(buffer size = 0)", func);
3676 return NULL;
3677 }
3678
3679 void *map = _mesa_bufferobj_map_range(ctx, offset, length, access, bufObj,
3680 MAP_USER);
3681 if (!map) {
3682 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(map failed)", func);
3683 }
3684 else {
3685 /* The driver callback should have set all these fields.
3686 * This is important because other modules (like VBO) might call
3687 * the driver function directly.
3688 */
3689 assert(bufObj->Mappings[MAP_USER].Pointer == map);
3690 assert(bufObj->Mappings[MAP_USER].Length == length);
3691 assert(bufObj->Mappings[MAP_USER].Offset == offset);
3692 assert(bufObj->Mappings[MAP_USER].AccessFlags == access);
3693 }
3694
3695 if (access & GL_MAP_WRITE_BIT) {
3696 bufObj->MinMaxCacheDirty = true;
3697 }
3698
3699 #ifdef VBO_DEBUG
3700 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3701 printf("glMapBuffer(%u, sz %ld, access 0x%x)\n",
3702 bufObj->Name, bufObj->Size, access);
3703 /* Access must be write only */
3704 if ((access & GL_MAP_WRITE_BIT) && (!(access & ~GL_MAP_WRITE_BIT))) {
3705 GLuint i;
3706 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3707 for (i = 0; i < bufObj->Size; i++)
3708 b[i] = i & 0xff;
3709 }
3710 }
3711 #endif
3712
3713 #ifdef BOUNDS_CHECK
3714 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3715 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3716 GLuint i;
3717 /* buffer is 100 bytes larger than requested, fill with magic value */
3718 for (i = 0; i < 100; i++) {
3719 buf[bufObj->Size - i - 1] = 123;
3720 }
3721 }
3722 #endif
3723
3724 return map;
3725 }
3726
3727 void * GLAPIENTRY
_mesa_MapBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3728 _mesa_MapBufferRange_no_error(GLenum target, GLintptr offset,
3729 GLsizeiptr length, GLbitfield access)
3730 {
3731 GET_CURRENT_CONTEXT(ctx);
3732
3733 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3734 struct gl_buffer_object *bufObj = *bufObjPtr;
3735
3736 return map_buffer_range(ctx, bufObj, offset, length, access,
3737 "glMapBufferRange");
3738 }
3739
3740 void * GLAPIENTRY
_mesa_MapBufferRange(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3741 _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
3742 GLbitfield access)
3743 {
3744 GET_CURRENT_CONTEXT(ctx);
3745 struct gl_buffer_object *bufObj;
3746
3747 if (!ctx->Extensions.ARB_map_buffer_range) {
3748 _mesa_error(ctx, GL_INVALID_OPERATION,
3749 "glMapBufferRange(ARB_map_buffer_range not supported)");
3750 return NULL;
3751 }
3752
3753 bufObj = get_buffer(ctx, "glMapBufferRange", target, GL_INVALID_OPERATION);
3754 if (!bufObj)
3755 return NULL;
3756
3757 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access,
3758 "glMapBufferRange"))
3759 return NULL;
3760
3761 return map_buffer_range(ctx, bufObj, offset, length, access,
3762 "glMapBufferRange");
3763 }
3764
3765 void * GLAPIENTRY
_mesa_MapNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3766 _mesa_MapNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
3767 GLsizeiptr length, GLbitfield access)
3768 {
3769 GET_CURRENT_CONTEXT(ctx);
3770 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3771
3772 return map_buffer_range(ctx, bufObj, offset, length, access,
3773 "glMapNamedBufferRange");
3774 }
3775
3776 static void *
map_named_buffer_range(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access,bool dsa_ext,const char * func)3777 map_named_buffer_range(GLuint buffer, GLintptr offset, GLsizeiptr length,
3778 GLbitfield access, bool dsa_ext, const char *func)
3779 {
3780 GET_CURRENT_CONTEXT(ctx);
3781 struct gl_buffer_object *bufObj = NULL;
3782
3783 if (!ctx->Extensions.ARB_map_buffer_range) {
3784 _mesa_error(ctx, GL_INVALID_OPERATION,
3785 "%s(ARB_map_buffer_range not supported)", func);
3786 return NULL;
3787 }
3788
3789 if (dsa_ext) {
3790 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3791 if (!handle_bind_buffer_gen(ctx, buffer, &bufObj, func, false))
3792 return NULL;
3793 } else {
3794 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
3795 if (!bufObj)
3796 return NULL;
3797 }
3798
3799 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access, func))
3800 return NULL;
3801
3802 return map_buffer_range(ctx, bufObj, offset, length, access, func);
3803 }
3804
3805 void * GLAPIENTRY
_mesa_MapNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3806 _mesa_MapNamedBufferRangeEXT(GLuint buffer, GLintptr offset, GLsizeiptr length,
3807 GLbitfield access)
3808 {
3809 GET_CURRENT_CONTEXT(ctx);
3810 if (!buffer) {
3811 _mesa_error(ctx, GL_INVALID_OPERATION,
3812 "glMapNamedBufferRangeEXT(buffer=0)");
3813 return NULL;
3814 }
3815 return map_named_buffer_range(buffer, offset, length, access, true,
3816 "glMapNamedBufferRangeEXT");
3817 }
3818
3819 void * GLAPIENTRY
_mesa_MapNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3820 _mesa_MapNamedBufferRange(GLuint buffer, GLintptr offset, GLsizeiptr length,
3821 GLbitfield access)
3822 {
3823 return map_named_buffer_range(buffer, offset, length, access, false,
3824 "glMapNamedBufferRange");
3825 }
3826
3827 /**
3828 * Converts GLenum access from MapBuffer and MapNamedBuffer into
3829 * flags for input to map_buffer_range.
3830 *
3831 * \return true if the type of requested access is permissible.
3832 */
3833 static bool
get_map_buffer_access_flags(struct gl_context * ctx,GLenum access,GLbitfield * flags)3834 get_map_buffer_access_flags(struct gl_context *ctx, GLenum access,
3835 GLbitfield *flags)
3836 {
3837 switch (access) {
3838 case GL_READ_ONLY_ARB:
3839 *flags = GL_MAP_READ_BIT;
3840 return _mesa_is_desktop_gl(ctx);
3841 case GL_WRITE_ONLY_ARB:
3842 *flags = GL_MAP_WRITE_BIT;
3843 return true;
3844 case GL_READ_WRITE_ARB:
3845 *flags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
3846 return _mesa_is_desktop_gl(ctx);
3847 default:
3848 *flags = 0;
3849 return false;
3850 }
3851 }
3852
3853 void * GLAPIENTRY
_mesa_MapBuffer_no_error(GLenum target,GLenum access)3854 _mesa_MapBuffer_no_error(GLenum target, GLenum access)
3855 {
3856 GET_CURRENT_CONTEXT(ctx);
3857
3858 GLbitfield accessFlags;
3859 get_map_buffer_access_flags(ctx, access, &accessFlags);
3860
3861 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3862 struct gl_buffer_object *bufObj = *bufObjPtr;
3863
3864 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3865 "glMapBuffer");
3866 }
3867
3868 void * GLAPIENTRY
_mesa_MapBuffer(GLenum target,GLenum access)3869 _mesa_MapBuffer(GLenum target, GLenum access)
3870 {
3871 GET_CURRENT_CONTEXT(ctx);
3872 struct gl_buffer_object *bufObj;
3873 GLbitfield accessFlags;
3874
3875 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3876 _mesa_error(ctx, GL_INVALID_ENUM, "glMapBuffer(invalid access)");
3877 return NULL;
3878 }
3879
3880 bufObj = get_buffer(ctx, "glMapBuffer", target, GL_INVALID_OPERATION);
3881 if (!bufObj)
3882 return NULL;
3883
3884 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3885 "glMapBuffer"))
3886 return NULL;
3887
3888 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3889 "glMapBuffer");
3890 }
3891
3892 void * GLAPIENTRY
_mesa_MapNamedBuffer_no_error(GLuint buffer,GLenum access)3893 _mesa_MapNamedBuffer_no_error(GLuint buffer, GLenum access)
3894 {
3895 GET_CURRENT_CONTEXT(ctx);
3896
3897 GLbitfield accessFlags;
3898 get_map_buffer_access_flags(ctx, access, &accessFlags);
3899
3900 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3901
3902 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3903 "glMapNamedBuffer");
3904 }
3905
3906 void * GLAPIENTRY
_mesa_MapNamedBuffer(GLuint buffer,GLenum access)3907 _mesa_MapNamedBuffer(GLuint buffer, GLenum access)
3908 {
3909 GET_CURRENT_CONTEXT(ctx);
3910 struct gl_buffer_object *bufObj;
3911 GLbitfield accessFlags;
3912
3913 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3914 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBuffer(invalid access)");
3915 return NULL;
3916 }
3917
3918 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glMapNamedBuffer");
3919 if (!bufObj)
3920 return NULL;
3921
3922 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3923 "glMapNamedBuffer"))
3924 return NULL;
3925
3926 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3927 "glMapNamedBuffer");
3928 }
3929
3930 void * GLAPIENTRY
_mesa_MapNamedBufferEXT(GLuint buffer,GLenum access)3931 _mesa_MapNamedBufferEXT(GLuint buffer, GLenum access)
3932 {
3933 GET_CURRENT_CONTEXT(ctx);
3934
3935 GLbitfield accessFlags;
3936 if (!buffer) {
3937 _mesa_error(ctx, GL_INVALID_OPERATION,
3938 "glMapNamedBufferEXT(buffer=0)");
3939 return NULL;
3940 }
3941 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3942 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBufferEXT(invalid access)");
3943 return NULL;
3944 }
3945
3946 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3947 if (!handle_bind_buffer_gen(ctx, buffer,
3948 &bufObj, "glMapNamedBufferEXT", false))
3949 return NULL;
3950
3951 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3952 "glMapNamedBufferEXT"))
3953 return NULL;
3954
3955 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3956 "glMapNamedBufferEXT");
3957 }
3958
3959 static void
flush_mapped_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,const char * func)3960 flush_mapped_buffer_range(struct gl_context *ctx,
3961 struct gl_buffer_object *bufObj,
3962 GLintptr offset, GLsizeiptr length,
3963 const char *func)
3964 {
3965 if (!ctx->Extensions.ARB_map_buffer_range) {
3966 _mesa_error(ctx, GL_INVALID_OPERATION,
3967 "%s(ARB_map_buffer_range not supported)", func);
3968 return;
3969 }
3970
3971 if (offset < 0) {
3972 _mesa_error(ctx, GL_INVALID_VALUE,
3973 "%s(offset %ld < 0)", func, (long) offset);
3974 return;
3975 }
3976
3977 if (length < 0) {
3978 _mesa_error(ctx, GL_INVALID_VALUE,
3979 "%s(length %ld < 0)", func, (long) length);
3980 return;
3981 }
3982
3983 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3984 /* buffer is not mapped */
3985 _mesa_error(ctx, GL_INVALID_OPERATION,
3986 "%s(buffer is not mapped)", func);
3987 return;
3988 }
3989
3990 if ((bufObj->Mappings[MAP_USER].AccessFlags &
3991 GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
3992 _mesa_error(ctx, GL_INVALID_OPERATION,
3993 "%s(GL_MAP_FLUSH_EXPLICIT_BIT not set)", func);
3994 return;
3995 }
3996
3997 if (offset + length > bufObj->Mappings[MAP_USER].Length) {
3998 _mesa_error(ctx, GL_INVALID_VALUE,
3999 "%s(offset %ld + length %ld > mapped length %ld)", func,
4000 (long) offset, (long) length,
4001 (long) bufObj->Mappings[MAP_USER].Length);
4002 return;
4003 }
4004
4005 assert(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT);
4006
4007 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4008 MAP_USER);
4009 }
4010
4011 void GLAPIENTRY
_mesa_FlushMappedBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length)4012 _mesa_FlushMappedBufferRange_no_error(GLenum target, GLintptr offset,
4013 GLsizeiptr length)
4014 {
4015 GET_CURRENT_CONTEXT(ctx);
4016 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
4017 struct gl_buffer_object *bufObj = *bufObjPtr;
4018
4019 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4020 MAP_USER);
4021 }
4022
4023 void GLAPIENTRY
_mesa_FlushMappedBufferRange(GLenum target,GLintptr offset,GLsizeiptr length)4024 _mesa_FlushMappedBufferRange(GLenum target, GLintptr offset,
4025 GLsizeiptr length)
4026 {
4027 GET_CURRENT_CONTEXT(ctx);
4028 struct gl_buffer_object *bufObj;
4029
4030 bufObj = get_buffer(ctx, "glFlushMappedBufferRange", target,
4031 GL_INVALID_OPERATION);
4032 if (!bufObj)
4033 return;
4034
4035 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4036 "glFlushMappedBufferRange");
4037 }
4038
4039 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)4040 _mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
4041 GLsizeiptr length)
4042 {
4043 GET_CURRENT_CONTEXT(ctx);
4044 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4045
4046 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4047 MAP_USER);
4048 }
4049
4050 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length)4051 _mesa_FlushMappedNamedBufferRange(GLuint buffer, GLintptr offset,
4052 GLsizeiptr length)
4053 {
4054 GET_CURRENT_CONTEXT(ctx);
4055 struct gl_buffer_object *bufObj;
4056
4057 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
4058 "glFlushMappedNamedBufferRange");
4059 if (!bufObj)
4060 return;
4061
4062 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4063 "glFlushMappedNamedBufferRange");
4064 }
4065
4066 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length)4067 _mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer, GLintptr offset,
4068 GLsizeiptr length)
4069 {
4070 GET_CURRENT_CONTEXT(ctx);
4071 struct gl_buffer_object *bufObj;
4072
4073 if (!buffer) {
4074 _mesa_error(ctx, GL_INVALID_OPERATION,
4075 "glFlushMappedNamedBufferRangeEXT(buffer=0)");
4076 return;
4077 }
4078
4079 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4080 if (!handle_bind_buffer_gen(ctx, buffer,
4081 &bufObj, "glFlushMappedNamedBufferRangeEXT", false))
4082 return;
4083
4084 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4085 "glFlushMappedNamedBufferRangeEXT");
4086 }
4087
4088 static void
bind_buffer_range_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4089 bind_buffer_range_uniform_buffer(struct gl_context *ctx, GLuint index,
4090 struct gl_buffer_object *bufObj,
4091 GLintptr offset, GLsizeiptr size)
4092 {
4093 if (!bufObj) {
4094 offset = -1;
4095 size = -1;
4096 }
4097
4098 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
4099 bind_uniform_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4100 }
4101
4102 /**
4103 * Bind a region of a buffer object to a uniform block binding point.
4104 * \param index the uniform buffer binding point index
4105 * \param bufObj the buffer object
4106 * \param offset offset to the start of buffer object region
4107 * \param size size of the buffer object region
4108 */
4109 static void
bind_buffer_range_uniform_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4110 bind_buffer_range_uniform_buffer_err(struct gl_context *ctx, GLuint index,
4111 struct gl_buffer_object *bufObj,
4112 GLintptr offset, GLsizeiptr size)
4113 {
4114 if (index >= ctx->Const.MaxUniformBufferBindings) {
4115 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4116 return;
4117 }
4118
4119 if (offset & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4120 _mesa_error(ctx, GL_INVALID_VALUE,
4121 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4122 ctx->Const.UniformBufferOffsetAlignment);
4123 return;
4124 }
4125
4126 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4127 }
4128
4129 static void
bind_buffer_range_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4130 bind_buffer_range_shader_storage_buffer(struct gl_context *ctx,
4131 GLuint index,
4132 struct gl_buffer_object *bufObj,
4133 GLintptr offset,
4134 GLsizeiptr size)
4135 {
4136 if (!bufObj) {
4137 offset = -1;
4138 size = -1;
4139 }
4140
4141 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
4142 bind_shader_storage_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4143 }
4144
4145 /**
4146 * Bind a region of a buffer object to a shader storage block binding point.
4147 * \param index the shader storage buffer binding point index
4148 * \param bufObj the buffer object
4149 * \param offset offset to the start of buffer object region
4150 * \param size size of the buffer object region
4151 */
4152 static void
bind_buffer_range_shader_storage_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4153 bind_buffer_range_shader_storage_buffer_err(struct gl_context *ctx,
4154 GLuint index,
4155 struct gl_buffer_object *bufObj,
4156 GLintptr offset, GLsizeiptr size)
4157 {
4158 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
4159 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4160 return;
4161 }
4162
4163 if (offset & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4164 _mesa_error(ctx, GL_INVALID_VALUE,
4165 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4166 ctx->Const.ShaderStorageBufferOffsetAlignment);
4167 return;
4168 }
4169
4170 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset, size);
4171 }
4172
4173 static void
bind_buffer_range_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4174 bind_buffer_range_atomic_buffer(struct gl_context *ctx, GLuint index,
4175 struct gl_buffer_object *bufObj,
4176 GLintptr offset, GLsizeiptr size)
4177 {
4178 if (!bufObj) {
4179 offset = -1;
4180 size = -1;
4181 }
4182
4183 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
4184 bind_atomic_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4185 }
4186
4187 /**
4188 * Bind a region of a buffer object to an atomic storage block binding point.
4189 * \param index the shader storage buffer binding point index
4190 * \param bufObj the buffer object
4191 * \param offset offset to the start of buffer object region
4192 * \param size size of the buffer object region
4193 */
4194 static void
bind_buffer_range_atomic_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4195 bind_buffer_range_atomic_buffer_err(struct gl_context *ctx,
4196 GLuint index,
4197 struct gl_buffer_object *bufObj,
4198 GLintptr offset, GLsizeiptr size)
4199 {
4200 if (index >= ctx->Const.MaxAtomicBufferBindings) {
4201 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4202 return;
4203 }
4204
4205 if (offset & (ATOMIC_COUNTER_SIZE - 1)) {
4206 _mesa_error(ctx, GL_INVALID_VALUE,
4207 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4208 ATOMIC_COUNTER_SIZE);
4209 return;
4210 }
4211
4212 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4213 }
4214
4215 static inline bool
bind_buffers_check_offset_and_size(struct gl_context * ctx,GLuint index,const GLintptr * offsets,const GLsizeiptr * sizes)4216 bind_buffers_check_offset_and_size(struct gl_context *ctx,
4217 GLuint index,
4218 const GLintptr *offsets,
4219 const GLsizeiptr *sizes)
4220 {
4221 if (offsets[index] < 0) {
4222 /* The ARB_multi_bind spec says:
4223 *
4224 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4225 * value in <offsets> is less than zero (per binding)."
4226 */
4227 _mesa_error(ctx, GL_INVALID_VALUE,
4228 "glBindBuffersRange(offsets[%u]=%" PRId64 " < 0)",
4229 index, (int64_t) offsets[index]);
4230 return false;
4231 }
4232
4233 if (sizes[index] <= 0) {
4234 /* The ARB_multi_bind spec says:
4235 *
4236 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4237 * value in <sizes> is less than or equal to zero (per binding)."
4238 */
4239 _mesa_error(ctx, GL_INVALID_VALUE,
4240 "glBindBuffersRange(sizes[%u]=%" PRId64 " <= 0)",
4241 index, (int64_t) sizes[index]);
4242 return false;
4243 }
4244
4245 return true;
4246 }
4247
4248 static bool
error_check_bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4249 error_check_bind_uniform_buffers(struct gl_context *ctx,
4250 GLuint first, GLsizei count,
4251 const char *caller)
4252 {
4253 if (!ctx->Extensions.ARB_uniform_buffer_object) {
4254 _mesa_error(ctx, GL_INVALID_ENUM,
4255 "%s(target=GL_UNIFORM_BUFFER)", caller);
4256 return false;
4257 }
4258
4259 /* The ARB_multi_bind_spec says:
4260 *
4261 * "An INVALID_OPERATION error is generated if <first> + <count> is
4262 * greater than the number of target-specific indexed binding points,
4263 * as described in section 6.7.1."
4264 */
4265 if (first + count > ctx->Const.MaxUniformBufferBindings) {
4266 _mesa_error(ctx, GL_INVALID_OPERATION,
4267 "%s(first=%u + count=%d > the value of "
4268 "GL_MAX_UNIFORM_BUFFER_BINDINGS=%u)",
4269 caller, first, count,
4270 ctx->Const.MaxUniformBufferBindings);
4271 return false;
4272 }
4273
4274 return true;
4275 }
4276
4277 static bool
error_check_bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4278 error_check_bind_shader_storage_buffers(struct gl_context *ctx,
4279 GLuint first, GLsizei count,
4280 const char *caller)
4281 {
4282 if (!ctx->Extensions.ARB_shader_storage_buffer_object) {
4283 _mesa_error(ctx, GL_INVALID_ENUM,
4284 "%s(target=GL_SHADER_STORAGE_BUFFER)", caller);
4285 return false;
4286 }
4287
4288 /* The ARB_multi_bind_spec says:
4289 *
4290 * "An INVALID_OPERATION error is generated if <first> + <count> is
4291 * greater than the number of target-specific indexed binding points,
4292 * as described in section 6.7.1."
4293 */
4294 if (first + count > ctx->Const.MaxShaderStorageBufferBindings) {
4295 _mesa_error(ctx, GL_INVALID_OPERATION,
4296 "%s(first=%u + count=%d > the value of "
4297 "GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS=%u)",
4298 caller, first, count,
4299 ctx->Const.MaxShaderStorageBufferBindings);
4300 return false;
4301 }
4302
4303 return true;
4304 }
4305
4306 /**
4307 * Unbind all uniform buffers in the range
4308 * <first> through <first>+<count>-1
4309 */
4310 static void
unbind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4311 unbind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4312 {
4313 for (int i = 0; i < count; i++)
4314 set_buffer_binding(ctx, &ctx->UniformBufferBindings[first + i],
4315 NULL, -1, -1, GL_TRUE, 0);
4316 }
4317
4318 /**
4319 * Unbind all shader storage buffers in the range
4320 * <first> through <first>+<count>-1
4321 */
4322 static void
unbind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4323 unbind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4324 GLsizei count)
4325 {
4326 for (int i = 0; i < count; i++)
4327 set_buffer_binding(ctx, &ctx->ShaderStorageBufferBindings[first + i],
4328 NULL, -1, -1, GL_TRUE, 0);
4329 }
4330
4331 static void
bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4332 bind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count,
4333 const GLuint *buffers,
4334 bool range,
4335 const GLintptr *offsets, const GLsizeiptr *sizes,
4336 const char *caller)
4337 {
4338 if (!error_check_bind_uniform_buffers(ctx, first, count, caller))
4339 return;
4340
4341 /* Assume that at least one binding will be changed */
4342 FLUSH_VERTICES(ctx, 0, 0);
4343 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
4344
4345 if (!buffers) {
4346 /* The ARB_multi_bind spec says:
4347 *
4348 * "If <buffers> is NULL, all bindings from <first> through
4349 * <first>+<count>-1 are reset to their unbound (zero) state.
4350 * In this case, the offsets and sizes associated with the
4351 * binding points are set to default values, ignoring
4352 * <offsets> and <sizes>."
4353 */
4354 unbind_uniform_buffers(ctx, first, count);
4355 return;
4356 }
4357
4358 /* Note that the error semantics for multi-bind commands differ from
4359 * those of other GL commands.
4360 *
4361 * The Issues section in the ARB_multi_bind spec says:
4362 *
4363 * "(11) Typically, OpenGL specifies that if an error is generated by a
4364 * command, that command has no effect. This is somewhat
4365 * unfortunate for multi-bind commands, because it would require a
4366 * first pass to scan the entire list of bound objects for errors
4367 * and then a second pass to actually perform the bindings.
4368 * Should we have different error semantics?
4369 *
4370 * RESOLVED: Yes. In this specification, when the parameters for
4371 * one of the <count> binding points are invalid, that binding point
4372 * is not updated and an error will be generated. However, other
4373 * binding points in the same command will be updated if their
4374 * parameters are valid and no other error occurs."
4375 */
4376
4377 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4378 ctx->BufferObjectsLocked);
4379
4380 for (int i = 0; i < count; i++) {
4381 struct gl_buffer_binding *binding =
4382 &ctx->UniformBufferBindings[first + i];
4383 GLintptr offset = 0;
4384 GLsizeiptr size = 0;
4385
4386 if (range) {
4387 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4388 continue;
4389
4390 /* The ARB_multi_bind spec says:
4391 *
4392 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4393 * pair of values in <offsets> and <sizes> does not respectively
4394 * satisfy the constraints described for those parameters for the
4395 * specified target, as described in section 6.7.1 (per binding)."
4396 *
4397 * Section 6.7.1 refers to table 6.5, which says:
4398 *
4399 * "┌───────────────────────────────────────────────────────────────┐
4400 * │ Uniform buffer array bindings (see sec. 7.6) │
4401 * ├─────────────────────┬─────────────────────────────────────────┤
4402 * │ ... │ ... │
4403 * │ offset restriction │ multiple of value of UNIFORM_BUFFER_- │
4404 * │ │ OFFSET_ALIGNMENT │
4405 * │ ... │ ... │
4406 * │ size restriction │ none │
4407 * └─────────────────────┴─────────────────────────────────────────┘"
4408 */
4409 if (offsets[i] & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4410 _mesa_error(ctx, GL_INVALID_VALUE,
4411 "glBindBuffersRange(offsets[%u]=%" PRId64
4412 " is misaligned; it must be a multiple of the value of "
4413 "GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT=%u when "
4414 "target=GL_UNIFORM_BUFFER)",
4415 i, (int64_t) offsets[i],
4416 ctx->Const.UniformBufferOffsetAlignment);
4417 continue;
4418 }
4419
4420 offset = offsets[i];
4421 size = sizes[i];
4422 }
4423
4424 set_buffer_multi_binding(ctx, buffers, i, caller,
4425 binding, offset, size, range,
4426 USAGE_UNIFORM_BUFFER);
4427 }
4428
4429 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4430 ctx->BufferObjectsLocked);
4431 }
4432
4433 static void
bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4434 bind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4435 GLsizei count, const GLuint *buffers,
4436 bool range,
4437 const GLintptr *offsets,
4438 const GLsizeiptr *sizes,
4439 const char *caller)
4440 {
4441 if (!error_check_bind_shader_storage_buffers(ctx, first, count, caller))
4442 return;
4443
4444 /* Assume that at least one binding will be changed */
4445 FLUSH_VERTICES(ctx, 0, 0);
4446 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
4447
4448 if (!buffers) {
4449 /* The ARB_multi_bind spec says:
4450 *
4451 * "If <buffers> is NULL, all bindings from <first> through
4452 * <first>+<count>-1 are reset to their unbound (zero) state.
4453 * In this case, the offsets and sizes associated with the
4454 * binding points are set to default values, ignoring
4455 * <offsets> and <sizes>."
4456 */
4457 unbind_shader_storage_buffers(ctx, first, count);
4458 return;
4459 }
4460
4461 /* Note that the error semantics for multi-bind commands differ from
4462 * those of other GL commands.
4463 *
4464 * The Issues section in the ARB_multi_bind spec says:
4465 *
4466 * "(11) Typically, OpenGL specifies that if an error is generated by a
4467 * command, that command has no effect. This is somewhat
4468 * unfortunate for multi-bind commands, because it would require a
4469 * first pass to scan the entire list of bound objects for errors
4470 * and then a second pass to actually perform the bindings.
4471 * Should we have different error semantics?
4472 *
4473 * RESOLVED: Yes. In this specification, when the parameters for
4474 * one of the <count> binding points are invalid, that binding point
4475 * is not updated and an error will be generated. However, other
4476 * binding points in the same command will be updated if their
4477 * parameters are valid and no other error occurs."
4478 */
4479
4480 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4481 ctx->BufferObjectsLocked);
4482
4483 for (int i = 0; i < count; i++) {
4484 struct gl_buffer_binding *binding =
4485 &ctx->ShaderStorageBufferBindings[first + i];
4486 GLintptr offset = 0;
4487 GLsizeiptr size = 0;
4488
4489 if (range) {
4490 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4491 continue;
4492
4493 /* The ARB_multi_bind spec says:
4494 *
4495 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4496 * pair of values in <offsets> and <sizes> does not respectively
4497 * satisfy the constraints described for those parameters for the
4498 * specified target, as described in section 6.7.1 (per binding)."
4499 *
4500 * Section 6.7.1 refers to table 6.5, which says:
4501 *
4502 * "┌───────────────────────────────────────────────────────────────┐
4503 * │ Shader storage buffer array bindings (see sec. 7.8) │
4504 * ├─────────────────────┬─────────────────────────────────────────┤
4505 * │ ... │ ... │
4506 * │ offset restriction │ multiple of value of SHADER_STORAGE_- │
4507 * │ │ BUFFER_OFFSET_ALIGNMENT │
4508 * │ ... │ ... │
4509 * │ size restriction │ none │
4510 * └─────────────────────┴─────────────────────────────────────────┘"
4511 */
4512 if (offsets[i] & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4513 _mesa_error(ctx, GL_INVALID_VALUE,
4514 "glBindBuffersRange(offsets[%u]=%" PRId64
4515 " is misaligned; it must be a multiple of the value of "
4516 "GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT=%u when "
4517 "target=GL_SHADER_STORAGE_BUFFER)",
4518 i, (int64_t) offsets[i],
4519 ctx->Const.ShaderStorageBufferOffsetAlignment);
4520 continue;
4521 }
4522
4523 offset = offsets[i];
4524 size = sizes[i];
4525 }
4526
4527 set_buffer_multi_binding(ctx, buffers, i, caller,
4528 binding, offset, size, range,
4529 USAGE_SHADER_STORAGE_BUFFER);
4530 }
4531
4532 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4533 ctx->BufferObjectsLocked);
4534 }
4535
4536 static bool
error_check_bind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count,const char * caller)4537 error_check_bind_xfb_buffers(struct gl_context *ctx,
4538 struct gl_transform_feedback_object *tfObj,
4539 GLuint first, GLsizei count, const char *caller)
4540 {
4541 if (!ctx->Extensions.EXT_transform_feedback) {
4542 _mesa_error(ctx, GL_INVALID_ENUM,
4543 "%s(target=GL_TRANSFORM_FEEDBACK_BUFFER)", caller);
4544 return false;
4545 }
4546
4547 /* Page 398 of the PDF of the OpenGL 4.4 (Core Profile) spec says:
4548 *
4549 * "An INVALID_OPERATION error is generated :
4550 *
4551 * ...
4552 * • by BindBufferRange or BindBufferBase if target is TRANSFORM_-
4553 * FEEDBACK_BUFFER and transform feedback is currently active."
4554 *
4555 * We assume that this is also meant to apply to BindBuffersRange
4556 * and BindBuffersBase.
4557 */
4558 if (tfObj->Active) {
4559 _mesa_error(ctx, GL_INVALID_OPERATION,
4560 "%s(Changing transform feedback buffers while "
4561 "transform feedback is active)", caller);
4562 return false;
4563 }
4564
4565 /* The ARB_multi_bind_spec says:
4566 *
4567 * "An INVALID_OPERATION error is generated if <first> + <count> is
4568 * greater than the number of target-specific indexed binding points,
4569 * as described in section 6.7.1."
4570 */
4571 if (first + count > ctx->Const.MaxTransformFeedbackBuffers) {
4572 _mesa_error(ctx, GL_INVALID_OPERATION,
4573 "%s(first=%u + count=%d > the value of "
4574 "GL_MAX_TRANSFORM_FEEDBACK_BUFFERS=%u)",
4575 caller, first, count,
4576 ctx->Const.MaxTransformFeedbackBuffers);
4577 return false;
4578 }
4579
4580 return true;
4581 }
4582
4583 /**
4584 * Unbind all transform feedback buffers in the range
4585 * <first> through <first>+<count>-1
4586 */
4587 static void
unbind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count)4588 unbind_xfb_buffers(struct gl_context *ctx,
4589 struct gl_transform_feedback_object *tfObj,
4590 GLuint first, GLsizei count)
4591 {
4592 for (int i = 0; i < count; i++)
4593 _mesa_set_transform_feedback_binding(ctx, tfObj, first + i,
4594 NULL, 0, 0);
4595 }
4596
4597 static void
bind_xfb_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4598 bind_xfb_buffers(struct gl_context *ctx,
4599 GLuint first, GLsizei count,
4600 const GLuint *buffers,
4601 bool range,
4602 const GLintptr *offsets,
4603 const GLsizeiptr *sizes,
4604 const char *caller)
4605 {
4606 struct gl_transform_feedback_object *tfObj =
4607 ctx->TransformFeedback.CurrentObject;
4608
4609 if (!error_check_bind_xfb_buffers(ctx, tfObj, first, count, caller))
4610 return;
4611
4612 /* Assume that at least one binding will be changed */
4613 FLUSH_VERTICES(ctx, 0, 0);
4614
4615 if (!buffers) {
4616 /* The ARB_multi_bind spec says:
4617 *
4618 * "If <buffers> is NULL, all bindings from <first> through
4619 * <first>+<count>-1 are reset to their unbound (zero) state.
4620 * In this case, the offsets and sizes associated with the
4621 * binding points are set to default values, ignoring
4622 * <offsets> and <sizes>."
4623 */
4624 unbind_xfb_buffers(ctx, tfObj, first, count);
4625 return;
4626 }
4627
4628 /* Note that the error semantics for multi-bind commands differ from
4629 * those of other GL commands.
4630 *
4631 * The Issues section in the ARB_multi_bind spec says:
4632 *
4633 * "(11) Typically, OpenGL specifies that if an error is generated by a
4634 * command, that command has no effect. This is somewhat
4635 * unfortunate for multi-bind commands, because it would require a
4636 * first pass to scan the entire list of bound objects for errors
4637 * and then a second pass to actually perform the bindings.
4638 * Should we have different error semantics?
4639 *
4640 * RESOLVED: Yes. In this specification, when the parameters for
4641 * one of the <count> binding points are invalid, that binding point
4642 * is not updated and an error will be generated. However, other
4643 * binding points in the same command will be updated if their
4644 * parameters are valid and no other error occurs."
4645 */
4646
4647 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4648 ctx->BufferObjectsLocked);
4649
4650 for (int i = 0; i < count; i++) {
4651 const GLuint index = first + i;
4652 struct gl_buffer_object * const boundBufObj = tfObj->Buffers[index];
4653 struct gl_buffer_object *bufObj;
4654 GLintptr offset = 0;
4655 GLsizeiptr size = 0;
4656
4657 if (range) {
4658 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4659 continue;
4660
4661 /* The ARB_multi_bind spec says:
4662 *
4663 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4664 * pair of values in <offsets> and <sizes> does not respectively
4665 * satisfy the constraints described for those parameters for the
4666 * specified target, as described in section 6.7.1 (per binding)."
4667 *
4668 * Section 6.7.1 refers to table 6.5, which says:
4669 *
4670 * "┌───────────────────────────────────────────────────────────────┐
4671 * │ Transform feedback array bindings (see sec. 13.2.2) │
4672 * ├───────────────────────┬───────────────────────────────────────┤
4673 * │ ... │ ... │
4674 * │ offset restriction │ multiple of 4 │
4675 * │ ... │ ... │
4676 * │ size restriction │ multiple of 4 │
4677 * └───────────────────────┴───────────────────────────────────────┘"
4678 */
4679 if (offsets[i] & 0x3) {
4680 _mesa_error(ctx, GL_INVALID_VALUE,
4681 "glBindBuffersRange(offsets[%u]=%" PRId64
4682 " is misaligned; it must be a multiple of 4 when "
4683 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4684 i, (int64_t) offsets[i]);
4685 continue;
4686 }
4687
4688 if (sizes[i] & 0x3) {
4689 _mesa_error(ctx, GL_INVALID_VALUE,
4690 "glBindBuffersRange(sizes[%u]=%" PRId64
4691 " is misaligned; it must be a multiple of 4 when "
4692 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4693 i, (int64_t) sizes[i]);
4694 continue;
4695 }
4696
4697 offset = offsets[i];
4698 size = sizes[i];
4699 }
4700
4701 if (boundBufObj && boundBufObj->Name == buffers[i])
4702 bufObj = boundBufObj;
4703 else {
4704 bool error;
4705 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, i, caller,
4706 &error);
4707 if (error)
4708 continue;
4709 }
4710
4711 _mesa_set_transform_feedback_binding(ctx, tfObj, index, bufObj,
4712 offset, size);
4713 }
4714
4715 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4716 ctx->BufferObjectsLocked);
4717 }
4718
4719 static bool
error_check_bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4720 error_check_bind_atomic_buffers(struct gl_context *ctx,
4721 GLuint first, GLsizei count,
4722 const char *caller)
4723 {
4724 if (!ctx->Extensions.ARB_shader_atomic_counters) {
4725 _mesa_error(ctx, GL_INVALID_ENUM,
4726 "%s(target=GL_ATOMIC_COUNTER_BUFFER)", caller);
4727 return false;
4728 }
4729
4730 /* The ARB_multi_bind_spec says:
4731 *
4732 * "An INVALID_OPERATION error is generated if <first> + <count> is
4733 * greater than the number of target-specific indexed binding points,
4734 * as described in section 6.7.1."
4735 */
4736 if (first + count > ctx->Const.MaxAtomicBufferBindings) {
4737 _mesa_error(ctx, GL_INVALID_OPERATION,
4738 "%s(first=%u + count=%d > the value of "
4739 "GL_MAX_ATOMIC_BUFFER_BINDINGS=%u)",
4740 caller, first, count, ctx->Const.MaxAtomicBufferBindings);
4741 return false;
4742 }
4743
4744 return true;
4745 }
4746
4747 /**
4748 * Unbind all atomic counter buffers in the range
4749 * <first> through <first>+<count>-1
4750 */
4751 static void
unbind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4752 unbind_atomic_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4753 {
4754 for (int i = 0; i < count; i++)
4755 set_buffer_binding(ctx, &ctx->AtomicBufferBindings[first + i],
4756 NULL, -1, -1, GL_TRUE, 0);
4757 }
4758
4759 static void
bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4760 bind_atomic_buffers(struct gl_context *ctx,
4761 GLuint first,
4762 GLsizei count,
4763 const GLuint *buffers,
4764 bool range,
4765 const GLintptr *offsets,
4766 const GLsizeiptr *sizes,
4767 const char *caller)
4768 {
4769 if (!error_check_bind_atomic_buffers(ctx, first, count, caller))
4770 return;
4771
4772 /* Assume that at least one binding will be changed */
4773 FLUSH_VERTICES(ctx, 0, 0);
4774 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
4775
4776 if (!buffers) {
4777 /* The ARB_multi_bind spec says:
4778 *
4779 * "If <buffers> is NULL, all bindings from <first> through
4780 * <first>+<count>-1 are reset to their unbound (zero) state.
4781 * In this case, the offsets and sizes associated with the
4782 * binding points are set to default values, ignoring
4783 * <offsets> and <sizes>."
4784 */
4785 unbind_atomic_buffers(ctx, first, count);
4786 return;
4787 }
4788
4789 /* Note that the error semantics for multi-bind commands differ from
4790 * those of other GL commands.
4791 *
4792 * The Issues section in the ARB_multi_bind spec says:
4793 *
4794 * "(11) Typically, OpenGL specifies that if an error is generated by a
4795 * command, that command has no effect. This is somewhat
4796 * unfortunate for multi-bind commands, because it would require a
4797 * first pass to scan the entire list of bound objects for errors
4798 * and then a second pass to actually perform the bindings.
4799 * Should we have different error semantics?
4800 *
4801 * RESOLVED: Yes. In this specification, when the parameters for
4802 * one of the <count> binding points are invalid, that binding point
4803 * is not updated and an error will be generated. However, other
4804 * binding points in the same command will be updated if their
4805 * parameters are valid and no other error occurs."
4806 */
4807
4808 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4809 ctx->BufferObjectsLocked);
4810
4811 for (int i = 0; i < count; i++) {
4812 struct gl_buffer_binding *binding =
4813 &ctx->AtomicBufferBindings[first + i];
4814 GLintptr offset = 0;
4815 GLsizeiptr size = 0;
4816
4817 if (range) {
4818 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4819 continue;
4820
4821 /* The ARB_multi_bind spec says:
4822 *
4823 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4824 * pair of values in <offsets> and <sizes> does not respectively
4825 * satisfy the constraints described for those parameters for the
4826 * specified target, as described in section 6.7.1 (per binding)."
4827 *
4828 * Section 6.7.1 refers to table 6.5, which says:
4829 *
4830 * "┌───────────────────────────────────────────────────────────────┐
4831 * │ Atomic counter array bindings (see sec. 7.7.2) │
4832 * ├───────────────────────┬───────────────────────────────────────┤
4833 * │ ... │ ... │
4834 * │ offset restriction │ multiple of 4 │
4835 * │ ... │ ... │
4836 * │ size restriction │ none │
4837 * └───────────────────────┴───────────────────────────────────────┘"
4838 */
4839 if (offsets[i] & (ATOMIC_COUNTER_SIZE - 1)) {
4840 _mesa_error(ctx, GL_INVALID_VALUE,
4841 "glBindBuffersRange(offsets[%u]=%" PRId64
4842 " is misaligned; it must be a multiple of %d when "
4843 "target=GL_ATOMIC_COUNTER_BUFFER)",
4844 i, (int64_t) offsets[i], ATOMIC_COUNTER_SIZE);
4845 continue;
4846 }
4847
4848 offset = offsets[i];
4849 size = sizes[i];
4850 }
4851
4852 set_buffer_multi_binding(ctx, buffers, i, caller,
4853 binding, offset, size, range,
4854 USAGE_ATOMIC_COUNTER_BUFFER);
4855 }
4856
4857 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4858 ctx->BufferObjectsLocked);
4859 }
4860
4861 static ALWAYS_INLINE void
bind_buffer_range(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size,bool no_error)4862 bind_buffer_range(GLenum target, GLuint index, GLuint buffer, GLintptr offset,
4863 GLsizeiptr size, bool no_error)
4864 {
4865 GET_CURRENT_CONTEXT(ctx);
4866 struct gl_buffer_object *bufObj;
4867
4868 if (MESA_VERBOSE & VERBOSE_API) {
4869 _mesa_debug(ctx, "glBindBufferRange(%s, %u, %u, %lu, %lu)\n",
4870 _mesa_enum_to_string(target), index, buffer,
4871 (unsigned long) offset, (unsigned long) size);
4872 }
4873
4874 if (buffer == 0) {
4875 bufObj = NULL;
4876 } else {
4877 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4878 if (!handle_bind_buffer_gen(ctx, buffer,
4879 &bufObj, "glBindBufferRange", no_error))
4880 return;
4881 }
4882
4883 if (no_error) {
4884 switch (target) {
4885 case GL_TRANSFORM_FEEDBACK_BUFFER:
4886 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4887 index, bufObj, offset, size);
4888 return;
4889 case GL_UNIFORM_BUFFER:
4890 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4891 return;
4892 case GL_SHADER_STORAGE_BUFFER:
4893 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset,
4894 size);
4895 return;
4896 case GL_ATOMIC_COUNTER_BUFFER:
4897 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4898 return;
4899 default:
4900 unreachable("invalid BindBufferRange target with KHR_no_error");
4901 }
4902 } else {
4903 if (buffer != 0) {
4904 if (size <= 0) {
4905 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(size=%d)",
4906 (int) size);
4907 return;
4908 }
4909 }
4910
4911 switch (target) {
4912 case GL_TRANSFORM_FEEDBACK_BUFFER:
4913 if (!_mesa_validate_buffer_range_xfb(ctx,
4914 ctx->TransformFeedback.CurrentObject,
4915 index, bufObj, offset, size,
4916 false))
4917 return;
4918
4919 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4920 index, bufObj, offset, size);
4921 return;
4922 case GL_UNIFORM_BUFFER:
4923 bind_buffer_range_uniform_buffer_err(ctx, index, bufObj, offset,
4924 size);
4925 return;
4926 case GL_SHADER_STORAGE_BUFFER:
4927 bind_buffer_range_shader_storage_buffer_err(ctx, index, bufObj,
4928 offset, size);
4929 return;
4930 case GL_ATOMIC_COUNTER_BUFFER:
4931 bind_buffer_range_atomic_buffer_err(ctx, index, bufObj,
4932 offset, size);
4933 return;
4934 default:
4935 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferRange(target)");
4936 return;
4937 }
4938 }
4939 }
4940
4941 void GLAPIENTRY
_mesa_BindBufferRange_no_error(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)4942 _mesa_BindBufferRange_no_error(GLenum target, GLuint index, GLuint buffer,
4943 GLintptr offset, GLsizeiptr size)
4944 {
4945 bind_buffer_range(target, index, buffer, offset, size, true);
4946 }
4947
4948 void GLAPIENTRY
_mesa_BindBufferRange(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)4949 _mesa_BindBufferRange(GLenum target, GLuint index,
4950 GLuint buffer, GLintptr offset, GLsizeiptr size)
4951 {
4952 bind_buffer_range(target, index, buffer, offset, size, false);
4953 }
4954
4955 void GLAPIENTRY
_mesa_BindBufferBase(GLenum target,GLuint index,GLuint buffer)4956 _mesa_BindBufferBase(GLenum target, GLuint index, GLuint buffer)
4957 {
4958 GET_CURRENT_CONTEXT(ctx);
4959 struct gl_buffer_object *bufObj;
4960
4961 if (MESA_VERBOSE & VERBOSE_API) {
4962 _mesa_debug(ctx, "glBindBufferBase(%s, %u, %u)\n",
4963 _mesa_enum_to_string(target), index, buffer);
4964 }
4965
4966 if (buffer == 0) {
4967 bufObj = NULL;
4968 } else {
4969 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4970 if (!handle_bind_buffer_gen(ctx, buffer,
4971 &bufObj, "glBindBufferBase", false))
4972 return;
4973 }
4974
4975 /* Note that there's some oddness in the GL 3.1-GL 3.3 specifications with
4976 * regards to BindBufferBase. It says (GL 3.1 core spec, page 63):
4977 *
4978 * "BindBufferBase is equivalent to calling BindBufferRange with offset
4979 * zero and size equal to the size of buffer."
4980 *
4981 * but it says for glGetIntegeri_v (GL 3.1 core spec, page 230):
4982 *
4983 * "If the parameter (starting offset or size) was not specified when the
4984 * buffer object was bound, zero is returned."
4985 *
4986 * What happens if the size of the buffer changes? Does the size of the
4987 * buffer at the moment glBindBufferBase was called still play a role, like
4988 * the first quote would imply, or is the size meaningless in the
4989 * glBindBufferBase case like the second quote would suggest? The GL 4.1
4990 * core spec page 45 says:
4991 *
4992 * "It is equivalent to calling BindBufferRange with offset zero, while
4993 * size is determined by the size of the bound buffer at the time the
4994 * binding is used."
4995 *
4996 * My interpretation is that the GL 4.1 spec was a clarification of the
4997 * behavior, not a change. In particular, this choice will only make
4998 * rendering work in cases where it would have had undefined results.
4999 */
5000
5001 switch (target) {
5002 case GL_TRANSFORM_FEEDBACK_BUFFER:
5003 _mesa_bind_buffer_base_transform_feedback(ctx,
5004 ctx->TransformFeedback.CurrentObject,
5005 index, bufObj, false);
5006 return;
5007 case GL_UNIFORM_BUFFER:
5008 bind_buffer_base_uniform_buffer(ctx, index, bufObj);
5009 return;
5010 case GL_SHADER_STORAGE_BUFFER:
5011 bind_buffer_base_shader_storage_buffer(ctx, index, bufObj);
5012 return;
5013 case GL_ATOMIC_COUNTER_BUFFER:
5014 bind_buffer_base_atomic_buffer(ctx, index, bufObj);
5015 return;
5016 default:
5017 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferBase(target)");
5018 return;
5019 }
5020 }
5021
5022 void GLAPIENTRY
_mesa_BindBuffersRange(GLenum target,GLuint first,GLsizei count,const GLuint * buffers,const GLintptr * offsets,const GLsizeiptr * sizes)5023 _mesa_BindBuffersRange(GLenum target, GLuint first, GLsizei count,
5024 const GLuint *buffers,
5025 const GLintptr *offsets, const GLsizeiptr *sizes)
5026 {
5027 GET_CURRENT_CONTEXT(ctx);
5028
5029 if (MESA_VERBOSE & VERBOSE_API) {
5030 _mesa_debug(ctx, "glBindBuffersRange(%s, %u, %d, %p, %p, %p)\n",
5031 _mesa_enum_to_string(target), first, count,
5032 buffers, offsets, sizes);
5033 }
5034
5035 switch (target) {
5036 case GL_TRANSFORM_FEEDBACK_BUFFER:
5037 bind_xfb_buffers(ctx, first, count, buffers, true, offsets, sizes,
5038 "glBindBuffersRange");
5039 return;
5040 case GL_UNIFORM_BUFFER:
5041 bind_uniform_buffers(ctx, first, count, buffers, true, offsets, sizes,
5042 "glBindBuffersRange");
5043 return;
5044 case GL_SHADER_STORAGE_BUFFER:
5045 bind_shader_storage_buffers(ctx, first, count, buffers, true, offsets, sizes,
5046 "glBindBuffersRange");
5047 return;
5048 case GL_ATOMIC_COUNTER_BUFFER:
5049 bind_atomic_buffers(ctx, first, count, buffers, true, offsets, sizes,
5050 "glBindBuffersRange");
5051 return;
5052 default:
5053 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersRange(target=%s)",
5054 _mesa_enum_to_string(target));
5055 break;
5056 }
5057 }
5058
5059 void GLAPIENTRY
_mesa_BindBuffersBase(GLenum target,GLuint first,GLsizei count,const GLuint * buffers)5060 _mesa_BindBuffersBase(GLenum target, GLuint first, GLsizei count,
5061 const GLuint *buffers)
5062 {
5063 GET_CURRENT_CONTEXT(ctx);
5064
5065 if (MESA_VERBOSE & VERBOSE_API) {
5066 _mesa_debug(ctx, "glBindBuffersBase(%s, %u, %d, %p)\n",
5067 _mesa_enum_to_string(target), first, count, buffers);
5068 }
5069
5070 switch (target) {
5071 case GL_TRANSFORM_FEEDBACK_BUFFER:
5072 bind_xfb_buffers(ctx, first, count, buffers, false, NULL, NULL,
5073 "glBindBuffersBase");
5074 return;
5075 case GL_UNIFORM_BUFFER:
5076 bind_uniform_buffers(ctx, first, count, buffers, false, NULL, NULL,
5077 "glBindBuffersBase");
5078 return;
5079 case GL_SHADER_STORAGE_BUFFER:
5080 bind_shader_storage_buffers(ctx, first, count, buffers, false, NULL, NULL,
5081 "glBindBuffersBase");
5082 return;
5083 case GL_ATOMIC_COUNTER_BUFFER:
5084 bind_atomic_buffers(ctx, first, count, buffers, false, NULL, NULL,
5085 "glBindBuffersBase");
5086 return;
5087 default:
5088 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersBase(target=%s)",
5089 _mesa_enum_to_string(target));
5090 break;
5091 }
5092 }
5093
5094 /**
5095 * Called via glInvalidateBuffer(Sub)Data.
5096 */
5097 static void
bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)5098 bufferobj_invalidate(struct gl_context *ctx,
5099 struct gl_buffer_object *obj,
5100 GLintptr offset,
5101 GLsizeiptr size)
5102 {
5103 struct pipe_context *pipe = ctx->pipe;
5104
5105 /* We ignore partial invalidates. */
5106 if (offset != 0 || size != obj->Size)
5107 return;
5108
5109 /* If the buffer is mapped, we can't invalidate it. */
5110 if (!obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
5111 return;
5112
5113 pipe->invalidate_resource(pipe, obj->buffer);
5114 }
5115
5116 static ALWAYS_INLINE void
invalidate_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length)5117 invalidate_buffer_subdata(struct gl_context *ctx,
5118 struct gl_buffer_object *bufObj, GLintptr offset,
5119 GLsizeiptr length)
5120 {
5121 if (ctx->has_invalidate_buffer)
5122 bufferobj_invalidate(ctx, bufObj, offset, length);
5123 }
5124
5125 void GLAPIENTRY
_mesa_InvalidateBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)5126 _mesa_InvalidateBufferSubData_no_error(GLuint buffer, GLintptr offset,
5127 GLsizeiptr length)
5128 {
5129 GET_CURRENT_CONTEXT(ctx);
5130
5131 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5132 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5133 }
5134
5135 void GLAPIENTRY
_mesa_InvalidateBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr length)5136 _mesa_InvalidateBufferSubData(GLuint buffer, GLintptr offset,
5137 GLsizeiptr length)
5138 {
5139 GET_CURRENT_CONTEXT(ctx);
5140 struct gl_buffer_object *bufObj;
5141 const GLintptr end = offset + length;
5142
5143 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5144 * Profile) spec says:
5145 *
5146 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5147 * name of an existing buffer object."
5148 */
5149 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5150 if (!bufObj || bufObj == &DummyBufferObject) {
5151 _mesa_error(ctx, GL_INVALID_VALUE,
5152 "glInvalidateBufferSubData(name = %u) invalid object",
5153 buffer);
5154 return;
5155 }
5156
5157 /* The GL_ARB_invalidate_subdata spec says:
5158 *
5159 * "An INVALID_VALUE error is generated if <offset> or <length> is
5160 * negative, or if <offset> + <length> is greater than the value of
5161 * BUFFER_SIZE."
5162 */
5163 if (offset < 0 || length < 0 || end > bufObj->Size) {
5164 _mesa_error(ctx, GL_INVALID_VALUE,
5165 "glInvalidateBufferSubData(invalid offset or length)");
5166 return;
5167 }
5168
5169 /* The OpenGL 4.4 (Core Profile) spec says:
5170 *
5171 * "An INVALID_OPERATION error is generated if buffer is currently
5172 * mapped by MapBuffer or if the invalidate range intersects the range
5173 * currently mapped by MapBufferRange, unless it was mapped
5174 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5175 */
5176 if (!(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT) &&
5177 bufferobj_range_mapped(bufObj, offset, length)) {
5178 _mesa_error(ctx, GL_INVALID_OPERATION,
5179 "glInvalidateBufferSubData(intersection with mapped "
5180 "range)");
5181 return;
5182 }
5183
5184 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5185 }
5186
5187 void GLAPIENTRY
_mesa_InvalidateBufferData_no_error(GLuint buffer)5188 _mesa_InvalidateBufferData_no_error(GLuint buffer)
5189 {
5190 GET_CURRENT_CONTEXT(ctx);
5191
5192 struct gl_buffer_object *bufObj =_mesa_lookup_bufferobj(ctx, buffer);
5193 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5194 }
5195
5196 void GLAPIENTRY
_mesa_InvalidateBufferData(GLuint buffer)5197 _mesa_InvalidateBufferData(GLuint buffer)
5198 {
5199 GET_CURRENT_CONTEXT(ctx);
5200 struct gl_buffer_object *bufObj;
5201
5202 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5203 * Profile) spec says:
5204 *
5205 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5206 * name of an existing buffer object."
5207 */
5208 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5209 if (!bufObj || bufObj == &DummyBufferObject) {
5210 _mesa_error(ctx, GL_INVALID_VALUE,
5211 "glInvalidateBufferData(name = %u) invalid object",
5212 buffer);
5213 return;
5214 }
5215
5216 /* The OpenGL 4.4 (Core Profile) spec says:
5217 *
5218 * "An INVALID_OPERATION error is generated if buffer is currently
5219 * mapped by MapBuffer or if the invalidate range intersects the range
5220 * currently mapped by MapBufferRange, unless it was mapped
5221 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5222 */
5223 if (_mesa_check_disallowed_mapping(bufObj)) {
5224 _mesa_error(ctx, GL_INVALID_OPERATION,
5225 "glInvalidateBufferData(intersection with mapped "
5226 "range)");
5227 return;
5228 }
5229
5230 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5231 }
5232
5233 static void
buffer_page_commitment(struct gl_context * ctx,struct gl_buffer_object * bufferObj,GLintptr offset,GLsizeiptr size,GLboolean commit,const char * func)5234 buffer_page_commitment(struct gl_context *ctx,
5235 struct gl_buffer_object *bufferObj,
5236 GLintptr offset, GLsizeiptr size,
5237 GLboolean commit, const char *func)
5238 {
5239 if (!(bufferObj->StorageFlags & GL_SPARSE_STORAGE_BIT_ARB)) {
5240 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(not a sparse buffer object)",
5241 func);
5242 return;
5243 }
5244
5245 if (size < 0 || size > bufferObj->Size ||
5246 offset < 0 || offset > bufferObj->Size - size) {
5247 _mesa_error(ctx, GL_INVALID_VALUE, "%s(out of bounds)",
5248 func);
5249 return;
5250 }
5251
5252 /* The GL_ARB_sparse_buffer extension specification says:
5253 *
5254 * "INVALID_VALUE is generated by BufferPageCommitmentARB if <offset> is
5255 * not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB, or if <size>
5256 * is not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB and does
5257 * not extend to the end of the buffer's data store."
5258 */
5259 if (offset % ctx->Const.SparseBufferPageSize != 0) {
5260 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset not aligned to page size)",
5261 func);
5262 return;
5263 }
5264
5265 if (size % ctx->Const.SparseBufferPageSize != 0 &&
5266 offset + size != bufferObj->Size) {
5267 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size not aligned to page size)",
5268 func);
5269 return;
5270 }
5271
5272 struct pipe_context *pipe = ctx->pipe;
5273 struct pipe_box box;
5274
5275 u_box_1d(offset, size, &box);
5276
5277 if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
5278 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
5279 }
5280 }
5281
5282 void GLAPIENTRY
_mesa_BufferPageCommitmentARB(GLenum target,GLintptr offset,GLsizeiptr size,GLboolean commit)5283 _mesa_BufferPageCommitmentARB(GLenum target, GLintptr offset, GLsizeiptr size,
5284 GLboolean commit)
5285 {
5286 GET_CURRENT_CONTEXT(ctx);
5287 struct gl_buffer_object *bufferObj;
5288
5289 bufferObj = get_buffer(ctx, "glBufferPageCommitmentARB", target,
5290 GL_INVALID_ENUM);
5291 if (!bufferObj)
5292 return;
5293
5294 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5295 "glBufferPageCommitmentARB");
5296 }
5297
5298 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentARB(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5299 _mesa_NamedBufferPageCommitmentARB(GLuint buffer, GLintptr offset,
5300 GLsizeiptr size, GLboolean commit)
5301 {
5302 GET_CURRENT_CONTEXT(ctx);
5303 struct gl_buffer_object *bufferObj;
5304
5305 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5306 if (!bufferObj || bufferObj == &DummyBufferObject) {
5307 /* Note: the extension spec is not clear about the excpected error value. */
5308 _mesa_error(ctx, GL_INVALID_VALUE,
5309 "glNamedBufferPageCommitmentARB(name = %u) invalid object",
5310 buffer);
5311 return;
5312 }
5313
5314 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5315 "glNamedBufferPageCommitmentARB");
5316 }
5317
5318 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5319 _mesa_NamedBufferPageCommitmentEXT(GLuint buffer, GLintptr offset,
5320 GLsizeiptr size, GLboolean commit)
5321 {
5322 GET_CURRENT_CONTEXT(ctx);
5323 struct gl_buffer_object *bufferObj;
5324
5325 /* Use NamedBuffer* functions logic from EXT_direct_state_access */
5326 if (buffer != 0) {
5327 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5328 if (!handle_bind_buffer_gen(ctx, buffer, &bufferObj,
5329 "glNamedBufferPageCommitmentEXT", false))
5330 return;
5331 } else {
5332 /* GL_EXT_direct_state_access says about NamedBuffer* functions:
5333 *
5334 * There is no buffer corresponding to the name zero, these commands
5335 * generate the INVALID_OPERATION error if the buffer parameter is
5336 * zero.
5337 */
5338 _mesa_error(ctx, GL_INVALID_OPERATION,
5339 "glNamedBufferPageCommitmentEXT(buffer = 0)");
5340 return;
5341 }
5342 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5343 "glNamedBufferPageCommitmentEXT");
5344 }
5345