1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
pipe_reference_init(struct pipe_reference * reference,unsigned count)55 pipe_reference_init(struct pipe_reference *reference, unsigned count)
56 {
57 p_atomic_set(&reference->count, count);
58 }
59
60 static inline boolean
pipe_is_referenced(struct pipe_reference * reference)61 pipe_is_referenced(struct pipe_reference *reference)
62 {
63 return p_atomic_read(&reference->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
pipe_reference_described(struct pipe_reference * ptr,struct pipe_reference * reference,debug_reference_descriptor get_desc)73 pipe_reference_described(struct pipe_reference *ptr,
74 struct pipe_reference *reference,
75 debug_reference_descriptor get_desc)
76 {
77 boolean destroy = FALSE;
78
79 if(ptr != reference) {
80 /* bump the reference.count first */
81 if (reference) {
82 assert(pipe_is_referenced(reference));
83 p_atomic_inc(&reference->count);
84 debug_reference(reference, get_desc, 1);
85 }
86
87 if (ptr) {
88 assert(pipe_is_referenced(ptr));
89 if (p_atomic_dec_zero(&ptr->count)) {
90 destroy = TRUE;
91 }
92 debug_reference(ptr, get_desc, -1);
93 }
94 }
95
96 return destroy;
97 }
98
99 static inline boolean
pipe_reference(struct pipe_reference * ptr,struct pipe_reference * reference)100 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101 {
102 return pipe_reference_described(ptr, reference,
103 (debug_reference_descriptor)debug_describe_reference);
104 }
105
106 static inline void
pipe_surface_reference(struct pipe_surface ** ptr,struct pipe_surface * surf)107 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108 {
109 struct pipe_surface *old_surf = *ptr;
110
111 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112 (debug_reference_descriptor)debug_describe_surface))
113 old_surf->context->surface_destroy(old_surf->context, old_surf);
114 *ptr = surf;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
pipe_surface_release(struct pipe_context * pipe,struct pipe_surface ** ptr)124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 if (pipe_reference_described(&(*ptr)->reference, NULL,
127 (debug_reference_descriptor)debug_describe_surface))
128 pipe->surface_destroy(pipe, *ptr);
129 *ptr = NULL;
130 }
131
132
133 static inline void
pipe_resource_reference(struct pipe_resource ** ptr,struct pipe_resource * tex)134 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135 {
136 struct pipe_resource *old_tex = *ptr;
137
138 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139 (debug_reference_descriptor)debug_describe_resource)) {
140 /* Avoid recursion, which would prevent inlining this function */
141 do {
142 struct pipe_resource *next = old_tex->next;
143
144 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
145 old_tex = next;
146 } while (pipe_reference_described(&old_tex->reference, NULL,
147 (debug_reference_descriptor)debug_describe_resource));
148 }
149 *ptr = tex;
150 }
151
152 /**
153 * Set *ptr to \p view with proper reference counting.
154 *
155 * The caller must guarantee that \p view and *ptr must have been created in
156 * the same context (if they exist), and that this must be the current context.
157 */
158 static inline void
pipe_sampler_view_reference(struct pipe_sampler_view ** ptr,struct pipe_sampler_view * view)159 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
160 {
161 struct pipe_sampler_view *old_view = *ptr;
162
163 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
164 (debug_reference_descriptor)debug_describe_sampler_view))
165 old_view->context->sampler_view_destroy(old_view->context, old_view);
166 *ptr = view;
167 }
168
169 /**
170 * Similar to pipe_sampler_view_reference() but always set the pointer to
171 * NULL and pass in the current context explicitly.
172 *
173 * If *ptr is non-NULL, it may refer to a view that was created in a different
174 * context (however, that context must still be alive).
175 */
176 static inline void
pipe_sampler_view_release(struct pipe_context * ctx,struct pipe_sampler_view ** ptr)177 pipe_sampler_view_release(struct pipe_context *ctx,
178 struct pipe_sampler_view **ptr)
179 {
180 struct pipe_sampler_view *old_view = *ptr;
181 if (pipe_reference_described(&(*ptr)->reference, NULL,
182 (debug_reference_descriptor)debug_describe_sampler_view)) {
183 ctx->sampler_view_destroy(ctx, old_view);
184 }
185 *ptr = NULL;
186 }
187
188 static inline void
pipe_so_target_reference(struct pipe_stream_output_target ** ptr,struct pipe_stream_output_target * target)189 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
190 struct pipe_stream_output_target *target)
191 {
192 struct pipe_stream_output_target *old = *ptr;
193
194 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
195 (debug_reference_descriptor)debug_describe_so_target))
196 old->context->stream_output_target_destroy(old->context, old);
197 *ptr = target;
198 }
199
200 static inline void
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer * dst)201 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
202 {
203 if (dst->is_user_buffer)
204 dst->buffer.user = NULL;
205 else
206 pipe_resource_reference(&dst->buffer.resource, NULL);
207 }
208
209 static inline void
pipe_vertex_buffer_reference(struct pipe_vertex_buffer * dst,const struct pipe_vertex_buffer * src)210 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
211 const struct pipe_vertex_buffer *src)
212 {
213 pipe_vertex_buffer_unreference(dst);
214 if (!src->is_user_buffer)
215 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
216 memcpy(dst, src, sizeof(*src));
217 }
218
219 static inline void
pipe_surface_reset(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)220 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
221 struct pipe_resource *pt, unsigned level, unsigned layer)
222 {
223 pipe_resource_reference(&ps->texture, pt);
224 ps->format = pt->format;
225 ps->width = u_minify(pt->width0, level);
226 ps->height = u_minify(pt->height0, level);
227 ps->u.tex.level = level;
228 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
229 ps->context = ctx;
230 }
231
232 static inline void
pipe_surface_init(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)233 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
234 struct pipe_resource *pt, unsigned level, unsigned layer)
235 {
236 ps->texture = 0;
237 pipe_reference_init(&ps->reference, 1);
238 pipe_surface_reset(ctx, ps, pt, level, layer);
239 }
240
241 /* Return true if the surfaces are equal. */
242 static inline boolean
pipe_surface_equal(struct pipe_surface * s1,struct pipe_surface * s2)243 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
244 {
245 return s1->texture == s2->texture &&
246 s1->format == s2->format &&
247 (s1->texture->target != PIPE_BUFFER ||
248 (s1->u.buf.first_element == s2->u.buf.first_element &&
249 s1->u.buf.last_element == s2->u.buf.last_element)) &&
250 (s1->texture->target == PIPE_BUFFER ||
251 (s1->u.tex.level == s2->u.tex.level &&
252 s1->u.tex.first_layer == s2->u.tex.first_layer &&
253 s1->u.tex.last_layer == s2->u.tex.last_layer));
254 }
255
256 /*
257 * Convenience wrappers for screen buffer functions.
258 */
259
260
261 /**
262 * Create a new resource.
263 * \param bind bitmask of PIPE_BIND_x flags
264 * \param usage a PIPE_USAGE_x value
265 */
266 static inline struct pipe_resource *
pipe_buffer_create(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)267 pipe_buffer_create( struct pipe_screen *screen,
268 unsigned bind,
269 enum pipe_resource_usage usage,
270 unsigned size )
271 {
272 struct pipe_resource buffer;
273 memset(&buffer, 0, sizeof buffer);
274 buffer.target = PIPE_BUFFER;
275 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
276 buffer.bind = bind;
277 buffer.usage = usage;
278 buffer.flags = 0;
279 buffer.width0 = size;
280 buffer.height0 = 1;
281 buffer.depth0 = 1;
282 buffer.array_size = 1;
283 return screen->resource_create(screen, &buffer);
284 }
285
286
287 /**
288 * Map a range of a resource.
289 * \param offset start of region, in bytes
290 * \param length size of region, in bytes
291 * \param access bitmask of PIPE_TRANSFER_x flags
292 * \param transfer returns a transfer object
293 */
294 static inline void *
pipe_buffer_map_range(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned offset,unsigned length,unsigned access,struct pipe_transfer ** transfer)295 pipe_buffer_map_range(struct pipe_context *pipe,
296 struct pipe_resource *buffer,
297 unsigned offset,
298 unsigned length,
299 unsigned access,
300 struct pipe_transfer **transfer)
301 {
302 struct pipe_box box;
303 void *map;
304
305 assert(offset < buffer->width0);
306 assert(offset + length <= buffer->width0);
307 assert(length);
308
309 u_box_1d(offset, length, &box);
310
311 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
312 if (!map) {
313 return NULL;
314 }
315
316 return map;
317 }
318
319
320 /**
321 * Map whole resource.
322 * \param access bitmask of PIPE_TRANSFER_x flags
323 * \param transfer returns a transfer object
324 */
325 static inline void *
pipe_buffer_map(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned access,struct pipe_transfer ** transfer)326 pipe_buffer_map(struct pipe_context *pipe,
327 struct pipe_resource *buffer,
328 unsigned access,
329 struct pipe_transfer **transfer)
330 {
331 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
332 }
333
334
335 static inline void
pipe_buffer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)336 pipe_buffer_unmap(struct pipe_context *pipe,
337 struct pipe_transfer *transfer)
338 {
339 pipe->transfer_unmap(pipe, transfer);
340 }
341
342 static inline void
pipe_buffer_flush_mapped_range(struct pipe_context * pipe,struct pipe_transfer * transfer,unsigned offset,unsigned length)343 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
344 struct pipe_transfer *transfer,
345 unsigned offset,
346 unsigned length)
347 {
348 struct pipe_box box;
349 int transfer_offset;
350
351 assert(length);
352 assert(transfer->box.x <= (int) offset);
353 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
354
355 /* Match old screen->buffer_flush_mapped_range() behaviour, where
356 * offset parameter is relative to the start of the buffer, not the
357 * mapped range.
358 */
359 transfer_offset = offset - transfer->box.x;
360
361 u_box_1d(transfer_offset, length, &box);
362
363 pipe->transfer_flush_region(pipe, transfer, &box);
364 }
365
366 static inline void
pipe_buffer_write(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)367 pipe_buffer_write(struct pipe_context *pipe,
368 struct pipe_resource *buf,
369 unsigned offset,
370 unsigned size,
371 const void *data)
372 {
373 /* Don't set any other usage bits. Drivers should derive them. */
374 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
375 }
376
377 /**
378 * Special case for writing non-overlapping ranges.
379 *
380 * We can avoid GPU/CPU synchronization when writing range that has never
381 * been written before.
382 */
383 static inline void
pipe_buffer_write_nooverlap(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)384 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
385 struct pipe_resource *buf,
386 unsigned offset, unsigned size,
387 const void *data)
388 {
389 pipe->buffer_subdata(pipe, buf,
390 (PIPE_TRANSFER_WRITE |
391 PIPE_TRANSFER_UNSYNCHRONIZED),
392 offset, size, data);
393 }
394
395
396 /**
397 * Create a new resource and immediately put data into it
398 * \param bind bitmask of PIPE_BIND_x flags
399 * \param usage bitmask of PIPE_USAGE_x flags
400 */
401 static inline struct pipe_resource *
pipe_buffer_create_with_data(struct pipe_context * pipe,unsigned bind,enum pipe_resource_usage usage,unsigned size,const void * ptr)402 pipe_buffer_create_with_data(struct pipe_context *pipe,
403 unsigned bind,
404 enum pipe_resource_usage usage,
405 unsigned size,
406 const void *ptr)
407 {
408 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
409 bind, usage, size);
410 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
411 return res;
412 }
413
414 static inline void
pipe_buffer_read(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,void * data)415 pipe_buffer_read(struct pipe_context *pipe,
416 struct pipe_resource *buf,
417 unsigned offset,
418 unsigned size,
419 void *data)
420 {
421 struct pipe_transfer *src_transfer;
422 ubyte *map;
423
424 map = (ubyte *) pipe_buffer_map_range(pipe,
425 buf,
426 offset, size,
427 PIPE_TRANSFER_READ,
428 &src_transfer);
429 if (!map)
430 return;
431
432 memcpy(data, map, size);
433 pipe_buffer_unmap(pipe, src_transfer);
434 }
435
436
437 /**
438 * Map a resource for reading/writing.
439 * \param access bitmask of PIPE_TRANSFER_x flags
440 */
441 static inline void *
pipe_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned layer,unsigned access,unsigned x,unsigned y,unsigned w,unsigned h,struct pipe_transfer ** transfer)442 pipe_transfer_map(struct pipe_context *context,
443 struct pipe_resource *resource,
444 unsigned level, unsigned layer,
445 unsigned access,
446 unsigned x, unsigned y,
447 unsigned w, unsigned h,
448 struct pipe_transfer **transfer)
449 {
450 struct pipe_box box;
451 u_box_2d_zslice(x, y, layer, w, h, &box);
452 return context->transfer_map(context,
453 resource,
454 level,
455 access,
456 &box, transfer);
457 }
458
459
460 /**
461 * Map a 3D (texture) resource for reading/writing.
462 * \param access bitmask of PIPE_TRANSFER_x flags
463 */
464 static inline void *
pipe_transfer_map_3d(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned access,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,struct pipe_transfer ** transfer)465 pipe_transfer_map_3d(struct pipe_context *context,
466 struct pipe_resource *resource,
467 unsigned level,
468 unsigned access,
469 unsigned x, unsigned y, unsigned z,
470 unsigned w, unsigned h, unsigned d,
471 struct pipe_transfer **transfer)
472 {
473 struct pipe_box box;
474 u_box_3d(x, y, z, w, h, d, &box);
475 return context->transfer_map(context,
476 resource,
477 level,
478 access,
479 &box, transfer);
480 }
481
482 static inline void
pipe_transfer_unmap(struct pipe_context * context,struct pipe_transfer * transfer)483 pipe_transfer_unmap( struct pipe_context *context,
484 struct pipe_transfer *transfer )
485 {
486 context->transfer_unmap( context, transfer );
487 }
488
489 static inline void
pipe_set_constant_buffer(struct pipe_context * pipe,enum pipe_shader_type shader,uint index,struct pipe_resource * buf)490 pipe_set_constant_buffer(struct pipe_context *pipe,
491 enum pipe_shader_type shader, uint index,
492 struct pipe_resource *buf)
493 {
494 if (buf) {
495 struct pipe_constant_buffer cb;
496 cb.buffer = buf;
497 cb.buffer_offset = 0;
498 cb.buffer_size = buf->width0;
499 cb.user_buffer = NULL;
500 pipe->set_constant_buffer(pipe, shader, index, &cb);
501 } else {
502 pipe->set_constant_buffer(pipe, shader, index, NULL);
503 }
504 }
505
506
507 /**
508 * Get the polygon offset enable/disable flag for the given polygon fill mode.
509 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
510 */
511 static inline boolean
util_get_offset(const struct pipe_rasterizer_state * templ,unsigned fill_mode)512 util_get_offset(const struct pipe_rasterizer_state *templ,
513 unsigned fill_mode)
514 {
515 switch(fill_mode) {
516 case PIPE_POLYGON_MODE_POINT:
517 return templ->offset_point;
518 case PIPE_POLYGON_MODE_LINE:
519 return templ->offset_line;
520 case PIPE_POLYGON_MODE_FILL:
521 return templ->offset_tri;
522 default:
523 assert(0);
524 return FALSE;
525 }
526 }
527
528 static inline float
util_get_min_point_size(const struct pipe_rasterizer_state * state)529 util_get_min_point_size(const struct pipe_rasterizer_state *state)
530 {
531 /* The point size should be clamped to this value at the rasterizer stage.
532 */
533 return !state->point_quad_rasterization &&
534 !state->point_smooth &&
535 !state->multisample ? 1.0f : 0.0f;
536 }
537
538 static inline void
util_query_clear_result(union pipe_query_result * result,unsigned type)539 util_query_clear_result(union pipe_query_result *result, unsigned type)
540 {
541 switch (type) {
542 case PIPE_QUERY_OCCLUSION_PREDICATE:
543 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
544 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
545 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
546 case PIPE_QUERY_GPU_FINISHED:
547 result->b = FALSE;
548 break;
549 case PIPE_QUERY_OCCLUSION_COUNTER:
550 case PIPE_QUERY_TIMESTAMP:
551 case PIPE_QUERY_TIME_ELAPSED:
552 case PIPE_QUERY_PRIMITIVES_GENERATED:
553 case PIPE_QUERY_PRIMITIVES_EMITTED:
554 result->u64 = 0;
555 break;
556 case PIPE_QUERY_SO_STATISTICS:
557 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
558 break;
559 case PIPE_QUERY_TIMESTAMP_DISJOINT:
560 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
561 break;
562 case PIPE_QUERY_PIPELINE_STATISTICS:
563 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
564 break;
565 default:
566 memset(result, 0, sizeof(*result));
567 }
568 }
569
570 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
571 static inline enum tgsi_texture_type
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,unsigned nr_samples)572 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
573 unsigned nr_samples)
574 {
575 switch (pipe_tex_target) {
576 case PIPE_BUFFER:
577 return TGSI_TEXTURE_BUFFER;
578
579 case PIPE_TEXTURE_1D:
580 assert(nr_samples <= 1);
581 return TGSI_TEXTURE_1D;
582
583 case PIPE_TEXTURE_2D:
584 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
585
586 case PIPE_TEXTURE_RECT:
587 assert(nr_samples <= 1);
588 return TGSI_TEXTURE_RECT;
589
590 case PIPE_TEXTURE_3D:
591 assert(nr_samples <= 1);
592 return TGSI_TEXTURE_3D;
593
594 case PIPE_TEXTURE_CUBE:
595 assert(nr_samples <= 1);
596 return TGSI_TEXTURE_CUBE;
597
598 case PIPE_TEXTURE_1D_ARRAY:
599 assert(nr_samples <= 1);
600 return TGSI_TEXTURE_1D_ARRAY;
601
602 case PIPE_TEXTURE_2D_ARRAY:
603 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
604 TGSI_TEXTURE_2D_ARRAY;
605
606 case PIPE_TEXTURE_CUBE_ARRAY:
607 return TGSI_TEXTURE_CUBE_ARRAY;
608
609 default:
610 assert(0 && "unexpected texture target");
611 return TGSI_TEXTURE_UNKNOWN;
612 }
613 }
614
615
616 static inline void
util_copy_constant_buffer(struct pipe_constant_buffer * dst,const struct pipe_constant_buffer * src)617 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
618 const struct pipe_constant_buffer *src)
619 {
620 if (src) {
621 pipe_resource_reference(&dst->buffer, src->buffer);
622 dst->buffer_offset = src->buffer_offset;
623 dst->buffer_size = src->buffer_size;
624 dst->user_buffer = src->user_buffer;
625 }
626 else {
627 pipe_resource_reference(&dst->buffer, NULL);
628 dst->buffer_offset = 0;
629 dst->buffer_size = 0;
630 dst->user_buffer = NULL;
631 }
632 }
633
634 static inline void
util_copy_image_view(struct pipe_image_view * dst,const struct pipe_image_view * src)635 util_copy_image_view(struct pipe_image_view *dst,
636 const struct pipe_image_view *src)
637 {
638 if (src) {
639 pipe_resource_reference(&dst->resource, src->resource);
640 dst->format = src->format;
641 dst->access = src->access;
642 dst->u = src->u;
643 } else {
644 pipe_resource_reference(&dst->resource, NULL);
645 dst->format = PIPE_FORMAT_NONE;
646 dst->access = 0;
647 memset(&dst->u, 0, sizeof(dst->u));
648 }
649 }
650
651 static inline unsigned
util_max_layer(const struct pipe_resource * r,unsigned level)652 util_max_layer(const struct pipe_resource *r, unsigned level)
653 {
654 switch (r->target) {
655 case PIPE_TEXTURE_3D:
656 return u_minify(r->depth0, level) - 1;
657 case PIPE_TEXTURE_CUBE:
658 assert(r->array_size == 6);
659 /* fall-through */
660 case PIPE_TEXTURE_1D_ARRAY:
661 case PIPE_TEXTURE_2D_ARRAY:
662 case PIPE_TEXTURE_CUBE_ARRAY:
663 return r->array_size - 1;
664 default:
665 return 0;
666 }
667 }
668
669 static inline unsigned
util_num_layers(const struct pipe_resource * r,unsigned level)670 util_num_layers(const struct pipe_resource *r, unsigned level)
671 {
672 return util_max_layer(r, level) + 1;
673 }
674
675 static inline bool
util_texrange_covers_whole_level(const struct pipe_resource * tex,unsigned level,unsigned x,unsigned y,unsigned z,unsigned width,unsigned height,unsigned depth)676 util_texrange_covers_whole_level(const struct pipe_resource *tex,
677 unsigned level, unsigned x, unsigned y,
678 unsigned z, unsigned width,
679 unsigned height, unsigned depth)
680 {
681 return x == 0 && y == 0 && z == 0 &&
682 width == u_minify(tex->width0, level) &&
683 height == u_minify(tex->height0, level) &&
684 depth == util_num_layers(tex, level);
685 }
686
687 #ifdef __cplusplus
688 }
689 #endif
690
691 #endif /* U_INLINES_H */
692