1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/compiler.h"
37 #include "util/format/u_format.h"
38 #include "util/u_debug.h"
39 #include "util/u_debug_describe.h"
40 #include "util/u_debug_refcnt.h"
41 #include "util/u_atomic.h"
42 #include "util/u_box.h"
43 #include "util/u_math.h"
44
45
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49
50
51 /*
52 * Reference counting helper functions.
53 */
54
55
56 static inline void
pipe_reference_init(struct pipe_reference * dst,unsigned count)57 pipe_reference_init(struct pipe_reference *dst, unsigned count)
58 {
59 dst->count = count;
60 }
61
62 static inline bool
pipe_is_referenced(struct pipe_reference * src)63 pipe_is_referenced(struct pipe_reference *src)
64 {
65 return p_atomic_read(&src->count) != 0;
66 }
67
68 /**
69 * Update reference counting.
70 * The old thing pointed to, if any, will be unreferenced.
71 * Both 'dst' and 'src' may be NULL.
72 * \return TRUE if the object's refcount hits zero and should be destroyed.
73 */
74 static inline bool
pipe_reference_described(struct pipe_reference * dst,struct pipe_reference * src,debug_reference_descriptor get_desc)75 pipe_reference_described(struct pipe_reference *dst,
76 struct pipe_reference *src,
77 debug_reference_descriptor get_desc)
78 {
79 if (dst != src) {
80 /* bump the src.count first */
81 if (src) {
82 ASSERTED int count = p_atomic_inc_return(&src->count);
83 assert(count != 1); /* src had to be referenced */
84 debug_reference(src, get_desc, 1);
85 }
86
87 if (dst) {
88 int count = p_atomic_dec_return(&dst->count);
89 assert(count != -1); /* dst had to be referenced */
90 debug_reference(dst, get_desc, -1);
91 if (!count)
92 return true;
93 }
94 }
95
96 return false;
97 }
98
99 static inline bool
pipe_reference(struct pipe_reference * dst,struct pipe_reference * src)100 pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
101 {
102 return pipe_reference_described(dst, src,
103 (debug_reference_descriptor)
104 debug_describe_reference);
105 }
106
107 static inline void
pipe_surface_reference(struct pipe_surface ** dst,struct pipe_surface * src)108 pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
109 {
110 struct pipe_surface *old_dst = *dst;
111
112 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
113 src ? &src->reference : NULL,
114 (debug_reference_descriptor)
115 debug_describe_surface))
116 old_dst->context->surface_destroy(old_dst->context, old_dst);
117 *dst = src;
118 }
119
120 /**
121 * Similar to pipe_surface_reference() but always set the pointer to NULL
122 * and pass in an explicit context. The explicit context avoids the problem
123 * of using a deleted context's surface_destroy() method when freeing a surface
124 * that's shared by multiple contexts.
125 */
126 static inline void
pipe_surface_release(struct pipe_context * pipe,struct pipe_surface ** ptr)127 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
128 {
129 struct pipe_surface *old = *ptr;
130
131 if (pipe_reference_described(&old->reference, NULL,
132 (debug_reference_descriptor)
133 debug_describe_surface))
134 pipe->surface_destroy(pipe, old);
135 *ptr = NULL;
136 }
137
138 static inline void
pipe_resource_destroy(struct pipe_resource * res)139 pipe_resource_destroy(struct pipe_resource *res)
140 {
141 /* Avoid recursion, which would prevent inlining this function */
142 do {
143 struct pipe_resource *next = res->next;
144
145 res->screen->resource_destroy(res->screen, res);
146 res = next;
147 } while (pipe_reference_described(res ? &res->reference : NULL,
148 NULL,
149 (debug_reference_descriptor)
150 debug_describe_resource));
151 }
152
153 static inline void
pipe_resource_reference(struct pipe_resource ** dst,struct pipe_resource * src)154 pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
155 {
156 struct pipe_resource *old_dst = *dst;
157
158 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
159 src ? &src->reference : NULL,
160 (debug_reference_descriptor)
161 debug_describe_resource)) {
162 pipe_resource_destroy(old_dst);
163 }
164 *dst = src;
165 }
166
167 /**
168 * Subtract the given number of references.
169 */
170 static inline void
pipe_drop_resource_references(struct pipe_resource * dst,int num_refs)171 pipe_drop_resource_references(struct pipe_resource *dst, int num_refs)
172 {
173 int count = p_atomic_add_return(&dst->reference.count, -num_refs);
174
175 assert(count >= 0);
176 /* Underflows shouldn't happen, but let's be safe. */
177 if (count <= 0)
178 pipe_resource_destroy(dst);
179 }
180
181 /**
182 * Same as pipe_surface_release, but used when pipe_context doesn't exist
183 * anymore.
184 */
185 static inline void
pipe_surface_release_no_context(struct pipe_surface ** ptr)186 pipe_surface_release_no_context(struct pipe_surface **ptr)
187 {
188 struct pipe_surface *surf = *ptr;
189
190 if (pipe_reference_described(&surf->reference, NULL,
191 (debug_reference_descriptor)
192 debug_describe_surface)) {
193 /* trivially destroy pipe_surface */
194 pipe_resource_reference(&surf->texture, NULL);
195 free(surf);
196 }
197 *ptr = NULL;
198 }
199
200 /**
201 * Set *dst to \p src with proper reference counting.
202 *
203 * The caller must guarantee that \p src and *dst were created in
204 * the same context (if they exist), and that this must be the current context.
205 */
206 static inline void
pipe_sampler_view_reference(struct pipe_sampler_view ** dst,struct pipe_sampler_view * src)207 pipe_sampler_view_reference(struct pipe_sampler_view **dst,
208 struct pipe_sampler_view *src)
209 {
210 struct pipe_sampler_view *old_dst = *dst;
211
212 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
213 src ? &src->reference : NULL,
214 (debug_reference_descriptor)
215 debug_describe_sampler_view))
216 old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
217 *dst = src;
218 }
219
220 static inline void
pipe_so_target_reference(struct pipe_stream_output_target ** dst,struct pipe_stream_output_target * src)221 pipe_so_target_reference(struct pipe_stream_output_target **dst,
222 struct pipe_stream_output_target *src)
223 {
224 struct pipe_stream_output_target *old_dst = *dst;
225
226 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
227 src ? &src->reference : NULL,
228 (debug_reference_descriptor)debug_describe_so_target))
229 old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
230 *dst = src;
231 }
232
233 static inline void
pipe_vertex_state_reference(struct pipe_vertex_state ** dst,struct pipe_vertex_state * src)234 pipe_vertex_state_reference(struct pipe_vertex_state **dst,
235 struct pipe_vertex_state *src)
236 {
237 struct pipe_vertex_state *old_dst = *dst;
238
239 if (pipe_reference(old_dst ? &old_dst->reference : NULL,
240 src ? &src->reference : NULL))
241 old_dst->screen->vertex_state_destroy(old_dst->screen, old_dst);
242 *dst = src;
243 }
244
245 static inline void
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer * dst)246 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
247 {
248 if (dst->is_user_buffer)
249 dst->buffer.user = NULL;
250 else
251 pipe_resource_reference(&dst->buffer.resource, NULL);
252 }
253
254 static inline void
pipe_vertex_buffer_reference(struct pipe_vertex_buffer * dst,const struct pipe_vertex_buffer * src)255 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
256 const struct pipe_vertex_buffer *src)
257 {
258 if (dst->buffer.resource == src->buffer.resource) {
259 /* Just copy the fields, don't touch reference counts. */
260 dst->is_user_buffer = src->is_user_buffer;
261 dst->buffer_offset = src->buffer_offset;
262 return;
263 }
264
265 pipe_vertex_buffer_unreference(dst);
266 /* Don't use memcpy because there is a hole between variables.
267 * dst can be used as a hash key.
268 */
269 dst->is_user_buffer = src->is_user_buffer;
270 dst->buffer_offset = src->buffer_offset;
271
272 if (src->is_user_buffer)
273 dst->buffer.user = src->buffer.user;
274 else
275 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
276 }
277
278 static inline void
pipe_surface_reset(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)279 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
280 struct pipe_resource *pt, unsigned level, unsigned layer)
281 {
282 pipe_resource_reference(&ps->texture, pt);
283 ps->format = pt->format;
284 ps->width = u_minify(pt->width0, level);
285 ps->height = u_minify(pt->height0, level);
286 ps->u.tex.level = level;
287 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
288 ps->context = ctx;
289 }
290
291 static inline void
pipe_surface_init(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)292 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
293 struct pipe_resource *pt, unsigned level, unsigned layer)
294 {
295 ps->texture = 0;
296 pipe_reference_init(&ps->reference, 1);
297 pipe_surface_reset(ctx, ps, pt, level, layer);
298 }
299
300 /* Return true if the surfaces are equal. */
301 static inline bool
pipe_surface_equal(struct pipe_surface * s1,struct pipe_surface * s2)302 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
303 {
304 return s1->texture == s2->texture &&
305 s1->format == s2->format &&
306 (s1->texture->target != PIPE_BUFFER ||
307 (s1->u.buf.first_element == s2->u.buf.first_element &&
308 s1->u.buf.last_element == s2->u.buf.last_element)) &&
309 (s1->texture->target == PIPE_BUFFER ||
310 (s1->u.tex.level == s2->u.tex.level &&
311 s1->u.tex.first_layer == s2->u.tex.first_layer &&
312 s1->u.tex.last_layer == s2->u.tex.last_layer));
313 }
314
315 /*
316 * Convenience wrappers for screen buffer functions.
317 */
318
319
320 static inline unsigned
pipe_buffer_size(const struct pipe_resource * buffer)321 pipe_buffer_size(const struct pipe_resource *buffer)
322 {
323 return buffer->width0;
324 }
325
326
327 /**
328 * Create a new resource.
329 * \param bind bitmask of PIPE_BIND_x flags
330 * \param usage a PIPE_USAGE_x value
331 */
332 static inline struct pipe_resource *
pipe_buffer_create(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)333 pipe_buffer_create(struct pipe_screen *screen,
334 unsigned bind,
335 enum pipe_resource_usage usage,
336 unsigned size)
337 {
338 struct pipe_resource buffer;
339 memset(&buffer, 0, sizeof buffer);
340 buffer.target = PIPE_BUFFER;
341 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
342 buffer.bind = bind;
343 buffer.usage = usage;
344 buffer.flags = 0;
345 buffer.width0 = size;
346 buffer.height0 = 1;
347 buffer.depth0 = 1;
348 buffer.array_size = 1;
349 return screen->resource_create(screen, &buffer);
350 }
351
352
353 static inline struct pipe_resource *
pipe_buffer_create_const0(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)354 pipe_buffer_create_const0(struct pipe_screen *screen,
355 unsigned bind,
356 enum pipe_resource_usage usage,
357 unsigned size)
358 {
359 struct pipe_resource buffer;
360 memset(&buffer, 0, sizeof buffer);
361 buffer.target = PIPE_BUFFER;
362 buffer.format = PIPE_FORMAT_R8_UNORM;
363 buffer.bind = bind;
364 buffer.usage = usage;
365 buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
366 buffer.width0 = size;
367 buffer.height0 = 1;
368 buffer.depth0 = 1;
369 buffer.array_size = 1;
370 return screen->resource_create(screen, &buffer);
371 }
372
373
374 /**
375 * Map a range of a resource.
376 * \param offset start of region, in bytes
377 * \param length size of region, in bytes
378 * \param access bitmask of PIPE_MAP_x flags
379 * \param transfer returns a transfer object
380 */
381 static inline void *
pipe_buffer_map_range(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned offset,unsigned length,unsigned access,struct pipe_transfer ** transfer)382 pipe_buffer_map_range(struct pipe_context *pipe,
383 struct pipe_resource *buffer,
384 unsigned offset,
385 unsigned length,
386 unsigned access,
387 struct pipe_transfer **transfer)
388 {
389 struct pipe_box box;
390 void *map;
391
392 assert(offset < buffer->width0);
393 assert(offset + length <= buffer->width0);
394 assert(length);
395
396 u_box_1d(offset, length, &box);
397
398 map = pipe->buffer_map(pipe, buffer, 0, access, &box, transfer);
399 if (!map) {
400 return NULL;
401 }
402
403 return map;
404 }
405
406
407 /**
408 * Map whole resource.
409 * \param access bitmask of PIPE_MAP_x flags
410 * \param transfer returns a transfer object
411 */
412 static inline void *
pipe_buffer_map(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned access,struct pipe_transfer ** transfer)413 pipe_buffer_map(struct pipe_context *pipe,
414 struct pipe_resource *buffer,
415 unsigned access,
416 struct pipe_transfer **transfer)
417 {
418 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
419 access, transfer);
420 }
421
422
423 static inline void
pipe_buffer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)424 pipe_buffer_unmap(struct pipe_context *pipe,
425 struct pipe_transfer *transfer)
426 {
427 pipe->buffer_unmap(pipe, transfer);
428 }
429
430 static inline void
pipe_buffer_flush_mapped_range(struct pipe_context * pipe,struct pipe_transfer * transfer,unsigned offset,unsigned length)431 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
432 struct pipe_transfer *transfer,
433 unsigned offset,
434 unsigned length)
435 {
436 struct pipe_box box;
437 int transfer_offset;
438
439 assert(length);
440 assert(transfer->box.x <= (int) offset);
441 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
442
443 /* Match old screen->buffer_flush_mapped_range() behaviour, where
444 * offset parameter is relative to the start of the buffer, not the
445 * mapped range.
446 */
447 transfer_offset = offset - transfer->box.x;
448
449 u_box_1d(transfer_offset, length, &box);
450
451 pipe->transfer_flush_region(pipe, transfer, &box);
452 }
453
454 static inline void
pipe_buffer_write(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)455 pipe_buffer_write(struct pipe_context *pipe,
456 struct pipe_resource *buf,
457 unsigned offset,
458 unsigned size,
459 const void *data)
460 {
461 /* Don't set any other usage bits. Drivers should derive them. */
462 pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
463 }
464
465 /**
466 * Special case for writing non-overlapping ranges.
467 *
468 * We can avoid GPU/CPU synchronization when writing range that has never
469 * been written before.
470 */
471 static inline void
pipe_buffer_write_nooverlap(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)472 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
473 struct pipe_resource *buf,
474 unsigned offset, unsigned size,
475 const void *data)
476 {
477 pipe->buffer_subdata(pipe, buf,
478 (PIPE_MAP_WRITE |
479 PIPE_MAP_UNSYNCHRONIZED),
480 offset, size, data);
481 }
482
483 /**
484 * Utility for simplifying pipe_context::resource_copy_region calls
485 */
486 static inline void
pipe_buffer_copy(struct pipe_context * pipe,struct pipe_resource * dst,struct pipe_resource * src,unsigned dst_offset,unsigned src_offset,unsigned size)487 pipe_buffer_copy(struct pipe_context *pipe,
488 struct pipe_resource *dst,
489 struct pipe_resource *src,
490 unsigned dst_offset,
491 unsigned src_offset,
492 unsigned size)
493 {
494 struct pipe_box box;
495 u_box_1d(src_offset, size, &box);
496 pipe->resource_copy_region(pipe, dst, 0, dst_offset, 0, 0, src, 0, &box);
497 }
498
499 /**
500 * Create a new resource and immediately put data into it
501 * \param bind bitmask of PIPE_BIND_x flags
502 * \param usage bitmask of PIPE_USAGE_x flags
503 */
504 static inline struct pipe_resource *
pipe_buffer_create_with_data(struct pipe_context * pipe,unsigned bind,enum pipe_resource_usage usage,unsigned size,const void * ptr)505 pipe_buffer_create_with_data(struct pipe_context *pipe,
506 unsigned bind,
507 enum pipe_resource_usage usage,
508 unsigned size,
509 const void *ptr)
510 {
511 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
512 bind, usage, size);
513 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
514 return res;
515 }
516
517 static inline void
pipe_buffer_read(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,void * data)518 pipe_buffer_read(struct pipe_context *pipe,
519 struct pipe_resource *buf,
520 unsigned offset,
521 unsigned size,
522 void *data)
523 {
524 struct pipe_transfer *src_transfer;
525 uint8_t *map;
526
527 map = (uint8_t *) pipe_buffer_map_range(pipe,
528 buf,
529 offset, size,
530 PIPE_MAP_READ,
531 &src_transfer);
532 if (!map)
533 return;
534
535 memcpy(data, map, size);
536 pipe_buffer_unmap(pipe, src_transfer);
537 }
538
539
540 /**
541 * Map a resource for reading/writing.
542 * \param access bitmask of PIPE_MAP_x flags
543 */
544 static inline void *
pipe_texture_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned layer,unsigned access,unsigned x,unsigned y,unsigned w,unsigned h,struct pipe_transfer ** transfer)545 pipe_texture_map(struct pipe_context *context,
546 struct pipe_resource *resource,
547 unsigned level, unsigned layer,
548 unsigned access,
549 unsigned x, unsigned y,
550 unsigned w, unsigned h,
551 struct pipe_transfer **transfer)
552 {
553 struct pipe_box box;
554 u_box_2d_zslice(x, y, layer, w, h, &box);
555 return context->texture_map(context, resource, level, access,
556 &box, transfer);
557 }
558
559
560 /**
561 * Map a 3D (texture) resource for reading/writing.
562 * \param access bitmask of PIPE_MAP_x flags
563 */
564 static inline void *
pipe_texture_map_3d(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned access,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,struct pipe_transfer ** transfer)565 pipe_texture_map_3d(struct pipe_context *context,
566 struct pipe_resource *resource,
567 unsigned level,
568 unsigned access,
569 unsigned x, unsigned y, unsigned z,
570 unsigned w, unsigned h, unsigned d,
571 struct pipe_transfer **transfer)
572 {
573 struct pipe_box box;
574 u_box_3d(x, y, z, w, h, d, &box);
575 return context->texture_map(context, resource, level, access,
576 &box, transfer);
577 }
578
579 static inline void
pipe_texture_unmap(struct pipe_context * context,struct pipe_transfer * transfer)580 pipe_texture_unmap(struct pipe_context *context,
581 struct pipe_transfer *transfer)
582 {
583 context->texture_unmap(context, transfer);
584 }
585
586 static inline void
pipe_set_constant_buffer(struct pipe_context * pipe,enum pipe_shader_type shader,uint index,struct pipe_resource * buf)587 pipe_set_constant_buffer(struct pipe_context *pipe,
588 enum pipe_shader_type shader, uint index,
589 struct pipe_resource *buf)
590 {
591 if (buf) {
592 struct pipe_constant_buffer cb;
593 cb.buffer = buf;
594 cb.buffer_offset = 0;
595 cb.buffer_size = buf->width0;
596 cb.user_buffer = NULL;
597 pipe->set_constant_buffer(pipe, shader, index, false, &cb);
598 } else {
599 pipe->set_constant_buffer(pipe, shader, index, false, NULL);
600 }
601 }
602
603
604 /**
605 * Get the polygon offset enable/disable flag for the given polygon fill mode.
606 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
607 */
608 static inline bool
util_get_offset(const struct pipe_rasterizer_state * templ,unsigned fill_mode)609 util_get_offset(const struct pipe_rasterizer_state *templ,
610 unsigned fill_mode)
611 {
612 switch(fill_mode) {
613 case PIPE_POLYGON_MODE_POINT:
614 return templ->offset_point;
615 case PIPE_POLYGON_MODE_LINE:
616 return templ->offset_line;
617 case PIPE_POLYGON_MODE_FILL:
618 return templ->offset_tri;
619 default:
620 assert(0);
621 return false;
622 }
623 }
624
625 static inline float
util_get_min_point_size(const struct pipe_rasterizer_state * state)626 util_get_min_point_size(const struct pipe_rasterizer_state *state)
627 {
628 /* The point size should be clamped to this value at the rasterizer stage.
629 */
630 return !state->point_quad_rasterization &&
631 !state->point_smooth &&
632 !state->multisample ? 1.0f : 0.0f;
633 }
634
635 static inline void
util_query_clear_result(union pipe_query_result * result,unsigned type)636 util_query_clear_result(union pipe_query_result *result, unsigned type)
637 {
638 switch (type) {
639 case PIPE_QUERY_OCCLUSION_PREDICATE:
640 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
641 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
642 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
643 case PIPE_QUERY_GPU_FINISHED:
644 result->b = false;
645 break;
646 case PIPE_QUERY_OCCLUSION_COUNTER:
647 case PIPE_QUERY_TIMESTAMP:
648 case PIPE_QUERY_TIME_ELAPSED:
649 case PIPE_QUERY_PRIMITIVES_GENERATED:
650 case PIPE_QUERY_PRIMITIVES_EMITTED:
651 result->u64 = 0;
652 break;
653 case PIPE_QUERY_SO_STATISTICS:
654 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
655 break;
656 case PIPE_QUERY_TIMESTAMP_DISJOINT:
657 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
658 break;
659 case PIPE_QUERY_PIPELINE_STATISTICS:
660 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
661 break;
662 default:
663 memset(result, 0, sizeof(*result));
664 }
665 }
666
667 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
668 static inline enum tgsi_texture_type
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,unsigned nr_samples)669 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
670 unsigned nr_samples)
671 {
672 switch (pipe_tex_target) {
673 case PIPE_BUFFER:
674 return TGSI_TEXTURE_BUFFER;
675
676 case PIPE_TEXTURE_1D:
677 assert(nr_samples <= 1);
678 return TGSI_TEXTURE_1D;
679
680 case PIPE_TEXTURE_2D:
681 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
682
683 case PIPE_TEXTURE_RECT:
684 assert(nr_samples <= 1);
685 return TGSI_TEXTURE_RECT;
686
687 case PIPE_TEXTURE_3D:
688 assert(nr_samples <= 1);
689 return TGSI_TEXTURE_3D;
690
691 case PIPE_TEXTURE_CUBE:
692 assert(nr_samples <= 1);
693 return TGSI_TEXTURE_CUBE;
694
695 case PIPE_TEXTURE_1D_ARRAY:
696 assert(nr_samples <= 1);
697 return TGSI_TEXTURE_1D_ARRAY;
698
699 case PIPE_TEXTURE_2D_ARRAY:
700 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
701 TGSI_TEXTURE_2D_ARRAY;
702
703 case PIPE_TEXTURE_CUBE_ARRAY:
704 return TGSI_TEXTURE_CUBE_ARRAY;
705
706 default:
707 assert(0 && "unexpected texture target");
708 return TGSI_TEXTURE_UNKNOWN;
709 }
710 }
711
712
713 static inline void
util_copy_constant_buffer(struct pipe_constant_buffer * dst,const struct pipe_constant_buffer * src,bool take_ownership)714 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
715 const struct pipe_constant_buffer *src,
716 bool take_ownership)
717 {
718 if (src) {
719 if (take_ownership) {
720 pipe_resource_reference(&dst->buffer, NULL);
721 dst->buffer = src->buffer;
722 } else {
723 pipe_resource_reference(&dst->buffer, src->buffer);
724 }
725 dst->buffer_offset = src->buffer_offset;
726 dst->buffer_size = src->buffer_size;
727 dst->user_buffer = src->user_buffer;
728 }
729 else {
730 pipe_resource_reference(&dst->buffer, NULL);
731 dst->buffer_offset = 0;
732 dst->buffer_size = 0;
733 dst->user_buffer = NULL;
734 }
735 }
736
737 static inline void
util_copy_shader_buffer(struct pipe_shader_buffer * dst,const struct pipe_shader_buffer * src)738 util_copy_shader_buffer(struct pipe_shader_buffer *dst,
739 const struct pipe_shader_buffer *src)
740 {
741 if (src) {
742 pipe_resource_reference(&dst->buffer, src->buffer);
743 dst->buffer_offset = src->buffer_offset;
744 dst->buffer_size = src->buffer_size;
745 }
746 else {
747 pipe_resource_reference(&dst->buffer, NULL);
748 dst->buffer_offset = 0;
749 dst->buffer_size = 0;
750 }
751 }
752
753 static inline void
util_copy_image_view(struct pipe_image_view * dst,const struct pipe_image_view * src)754 util_copy_image_view(struct pipe_image_view *dst,
755 const struct pipe_image_view *src)
756 {
757 if (src) {
758 pipe_resource_reference(&dst->resource, src->resource);
759 dst->format = src->format;
760 dst->access = src->access;
761 dst->shader_access = src->shader_access;
762 dst->u = src->u;
763 } else {
764 pipe_resource_reference(&dst->resource, NULL);
765 dst->format = PIPE_FORMAT_NONE;
766 dst->access = 0;
767 dst->shader_access = 0;
768 memset(&dst->u, 0, sizeof(dst->u));
769 }
770 }
771
772 static inline unsigned
util_max_layer(const struct pipe_resource * r,unsigned level)773 util_max_layer(const struct pipe_resource *r, unsigned level)
774 {
775 switch (r->target) {
776 case PIPE_TEXTURE_3D:
777 return u_minify(r->depth0, level) - 1;
778 case PIPE_TEXTURE_CUBE:
779 assert(r->array_size == 6);
780 FALLTHROUGH;
781 case PIPE_TEXTURE_1D_ARRAY:
782 case PIPE_TEXTURE_2D_ARRAY:
783 case PIPE_TEXTURE_CUBE_ARRAY:
784 return r->array_size - 1;
785 default:
786 return 0;
787 }
788 }
789
790 static inline unsigned
util_num_layers(const struct pipe_resource * r,unsigned level)791 util_num_layers(const struct pipe_resource *r, unsigned level)
792 {
793 return util_max_layer(r, level) + 1;
794 }
795
796 static inline bool
util_texrange_covers_whole_level(const struct pipe_resource * tex,unsigned level,unsigned x,unsigned y,unsigned z,unsigned width,unsigned height,unsigned depth)797 util_texrange_covers_whole_level(const struct pipe_resource *tex,
798 unsigned level, unsigned x, unsigned y,
799 unsigned z, unsigned width,
800 unsigned height, unsigned depth)
801 {
802 return x == 0 && y == 0 && z == 0 &&
803 width == u_minify(tex->width0, level) &&
804 height == u_minify(tex->height0, level) &&
805 depth == util_num_layers(tex, level);
806 }
807
808 /**
809 * Returns true if the blit will fully initialize all pixels in the resource.
810 */
811 static inline bool
util_blit_covers_whole_resource(const struct pipe_blit_info * info)812 util_blit_covers_whole_resource(const struct pipe_blit_info *info)
813 {
814 /* No conditional rendering or scissoring. (We assume that the caller would
815 * have dropped any redundant scissoring)
816 */
817 if (info->scissor_enable || info->window_rectangle_include || info->render_condition_enable || info->alpha_blend)
818 return false;
819
820 const struct pipe_resource *dst = info->dst.resource;
821 /* A single blit can't initialize a miptree. */
822 if (dst->last_level != 0)
823 return false;
824
825 assert(info->dst.level == 0);
826
827 /* Make sure the dst box covers the whole resource. */
828 if (!(util_texrange_covers_whole_level(dst, 0,
829 0, 0, 0,
830 info->dst.box.width, info->dst.box.height, info->dst.box.depth))) {
831 return false;
832 }
833
834 /* Make sure the mask actually updates all the channels present in the dst format. */
835 if (info->mask & PIPE_MASK_RGBA) {
836 if ((info->mask & PIPE_MASK_RGBA) != PIPE_MASK_RGBA)
837 return false;
838 }
839
840 if (info->mask & PIPE_MASK_ZS) {
841 const struct util_format_description *format_desc = util_format_description(info->dst.format);
842 uint32_t dst_has = 0;
843 if (util_format_has_depth(format_desc))
844 dst_has |= PIPE_MASK_Z;
845 if (util_format_has_stencil(format_desc))
846 dst_has |= PIPE_MASK_S;
847 if (dst_has & ~(info->mask & PIPE_MASK_ZS))
848 return false;
849 }
850
851 return true;
852 }
853
854 static inline bool
util_logicop_reads_dest(enum pipe_logicop op)855 util_logicop_reads_dest(enum pipe_logicop op)
856 {
857 switch (op) {
858 case PIPE_LOGICOP_NOR:
859 case PIPE_LOGICOP_AND_INVERTED:
860 case PIPE_LOGICOP_AND_REVERSE:
861 case PIPE_LOGICOP_INVERT:
862 case PIPE_LOGICOP_XOR:
863 case PIPE_LOGICOP_NAND:
864 case PIPE_LOGICOP_AND:
865 case PIPE_LOGICOP_EQUIV:
866 case PIPE_LOGICOP_NOOP:
867 case PIPE_LOGICOP_OR_INVERTED:
868 case PIPE_LOGICOP_OR_REVERSE:
869 case PIPE_LOGICOP_OR:
870 return true;
871 case PIPE_LOGICOP_CLEAR:
872 case PIPE_LOGICOP_COPY_INVERTED:
873 case PIPE_LOGICOP_COPY:
874 case PIPE_LOGICOP_SET:
875 return false;
876 }
877 unreachable("bad logicop");
878 }
879
880 static inline bool
util_writes_stencil(const struct pipe_stencil_state * s)881 util_writes_stencil(const struct pipe_stencil_state *s)
882 {
883 return s->enabled && s->writemask &&
884 ((s->fail_op != PIPE_STENCIL_OP_KEEP) ||
885 (s->zpass_op != PIPE_STENCIL_OP_KEEP) ||
886 (s->zfail_op != PIPE_STENCIL_OP_KEEP));
887 }
888
889 static inline bool
util_writes_depth(const struct pipe_depth_stencil_alpha_state * zsa)890 util_writes_depth(const struct pipe_depth_stencil_alpha_state *zsa)
891 {
892 return zsa->depth_enabled && zsa->depth_writemask &&
893 (zsa->depth_func != PIPE_FUNC_NEVER);
894 }
895
896 static inline bool
util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state * zsa)897 util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state *zsa)
898 {
899 return util_writes_depth(zsa) ||
900 util_writes_stencil(&zsa->stencil[0]) ||
901 util_writes_stencil(&zsa->stencil[1]);
902 }
903
904 static inline struct pipe_context *
pipe_create_multimedia_context(struct pipe_screen * screen)905 pipe_create_multimedia_context(struct pipe_screen *screen)
906 {
907 unsigned flags = 0;
908
909 if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
910 flags |= PIPE_CONTEXT_COMPUTE_ONLY;
911
912 return screen->context_create(screen, NULL, flags);
913 }
914
util_res_sample_count(struct pipe_resource * res)915 static inline unsigned util_res_sample_count(struct pipe_resource *res)
916 {
917 return res->nr_samples > 0 ? res->nr_samples : 1;
918 }
919
920 static inline void
util_set_vertex_buffers(struct pipe_context * pipe,unsigned num_buffers,bool take_ownership,const struct pipe_vertex_buffer * buffers)921 util_set_vertex_buffers(struct pipe_context *pipe,
922 unsigned num_buffers, bool take_ownership,
923 const struct pipe_vertex_buffer *buffers)
924 {
925 /* set_vertex_buffers requires that reference counts are incremented
926 * by the caller.
927 */
928 if (!take_ownership) {
929 for (unsigned i = 0; i < num_buffers; i++) {
930 if (!buffers[i].is_user_buffer && buffers[i].buffer.resource)
931 p_atomic_inc(&buffers[i].buffer.resource->reference.count);
932 }
933 }
934
935 pipe->set_vertex_buffers(pipe, num_buffers, buffers);
936 }
937
938 #ifdef __cplusplus
939 }
940 #endif
941
942 #endif /* U_INLINES_H */
943