1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
pipe_reference_init(struct pipe_reference * reference,unsigned count)55 pipe_reference_init(struct pipe_reference *reference, unsigned count)
56 {
57 p_atomic_set(&reference->count, count);
58 }
59
60 static inline boolean
pipe_is_referenced(struct pipe_reference * reference)61 pipe_is_referenced(struct pipe_reference *reference)
62 {
63 return p_atomic_read(&reference->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
pipe_reference_described(struct pipe_reference * ptr,struct pipe_reference * reference,debug_reference_descriptor get_desc)73 pipe_reference_described(struct pipe_reference *ptr,
74 struct pipe_reference *reference,
75 debug_reference_descriptor get_desc)
76 {
77 boolean destroy = FALSE;
78
79 if(ptr != reference) {
80 /* bump the reference.count first */
81 if (reference) {
82 assert(pipe_is_referenced(reference));
83 p_atomic_inc(&reference->count);
84 debug_reference(reference, get_desc, 1);
85 }
86
87 if (ptr) {
88 assert(pipe_is_referenced(ptr));
89 if (p_atomic_dec_zero(&ptr->count)) {
90 destroy = TRUE;
91 }
92 debug_reference(ptr, get_desc, -1);
93 }
94 }
95
96 return destroy;
97 }
98
99 static inline boolean
pipe_reference(struct pipe_reference * ptr,struct pipe_reference * reference)100 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101 {
102 return pipe_reference_described(ptr, reference,
103 (debug_reference_descriptor)debug_describe_reference);
104 }
105
106 static inline void
pipe_surface_reference(struct pipe_surface ** ptr,struct pipe_surface * surf)107 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108 {
109 struct pipe_surface *old_surf = *ptr;
110
111 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112 (debug_reference_descriptor)debug_describe_surface))
113 old_surf->context->surface_destroy(old_surf->context, old_surf);
114 *ptr = surf;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
pipe_surface_release(struct pipe_context * pipe,struct pipe_surface ** ptr)124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 if (pipe_reference_described(&(*ptr)->reference, NULL,
127 (debug_reference_descriptor)debug_describe_surface))
128 pipe->surface_destroy(pipe, *ptr);
129 *ptr = NULL;
130 }
131
132
133 static inline void
pipe_resource_reference(struct pipe_resource ** ptr,struct pipe_resource * tex)134 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135 {
136 struct pipe_resource *old_tex = *ptr;
137
138 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139 (debug_reference_descriptor)debug_describe_resource)) {
140 pipe_resource_reference(&old_tex->next, NULL);
141 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
142 }
143 *ptr = tex;
144 }
145
146 static inline void
pipe_sampler_view_reference(struct pipe_sampler_view ** ptr,struct pipe_sampler_view * view)147 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
148 {
149 struct pipe_sampler_view *old_view = *ptr;
150
151 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
152 (debug_reference_descriptor)debug_describe_sampler_view))
153 old_view->context->sampler_view_destroy(old_view->context, old_view);
154 *ptr = view;
155 }
156
157 /**
158 * Similar to pipe_sampler_view_reference() but always set the pointer to
159 * NULL and pass in an explicit context. Passing an explicit context is a
160 * work-around for fixing a dangling context pointer problem when textures
161 * are shared by multiple contexts. XXX fix this someday.
162 */
163 static inline void
pipe_sampler_view_release(struct pipe_context * ctx,struct pipe_sampler_view ** ptr)164 pipe_sampler_view_release(struct pipe_context *ctx,
165 struct pipe_sampler_view **ptr)
166 {
167 struct pipe_sampler_view *old_view = *ptr;
168 if (*ptr && (*ptr)->context != ctx) {
169 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
170 }
171 if (pipe_reference_described(&(*ptr)->reference, NULL,
172 (debug_reference_descriptor)debug_describe_sampler_view)) {
173 ctx->sampler_view_destroy(ctx, old_view);
174 }
175 *ptr = NULL;
176 }
177
178 static inline void
pipe_so_target_reference(struct pipe_stream_output_target ** ptr,struct pipe_stream_output_target * target)179 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
180 struct pipe_stream_output_target *target)
181 {
182 struct pipe_stream_output_target *old = *ptr;
183
184 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
185 (debug_reference_descriptor)debug_describe_so_target))
186 old->context->stream_output_target_destroy(old->context, old);
187 *ptr = target;
188 }
189
190 static inline void
pipe_surface_reset(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)191 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
192 struct pipe_resource *pt, unsigned level, unsigned layer)
193 {
194 pipe_resource_reference(&ps->texture, pt);
195 ps->format = pt->format;
196 ps->width = u_minify(pt->width0, level);
197 ps->height = u_minify(pt->height0, level);
198 ps->u.tex.level = level;
199 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
200 ps->context = ctx;
201 }
202
203 static inline void
pipe_surface_init(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)204 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
205 struct pipe_resource *pt, unsigned level, unsigned layer)
206 {
207 ps->texture = 0;
208 pipe_reference_init(&ps->reference, 1);
209 pipe_surface_reset(ctx, ps, pt, level, layer);
210 }
211
212 /* Return true if the surfaces are equal. */
213 static inline boolean
pipe_surface_equal(struct pipe_surface * s1,struct pipe_surface * s2)214 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
215 {
216 return s1->texture == s2->texture &&
217 s1->format == s2->format &&
218 (s1->texture->target != PIPE_BUFFER ||
219 (s1->u.buf.first_element == s2->u.buf.first_element &&
220 s1->u.buf.last_element == s2->u.buf.last_element)) &&
221 (s1->texture->target == PIPE_BUFFER ||
222 (s1->u.tex.level == s2->u.tex.level &&
223 s1->u.tex.first_layer == s2->u.tex.first_layer &&
224 s1->u.tex.last_layer == s2->u.tex.last_layer));
225 }
226
227 /*
228 * Convenience wrappers for screen buffer functions.
229 */
230
231
232 /**
233 * Create a new resource.
234 * \param bind bitmask of PIPE_BIND_x flags
235 * \param usage a PIPE_USAGE_x value
236 */
237 static inline struct pipe_resource *
pipe_buffer_create(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)238 pipe_buffer_create( struct pipe_screen *screen,
239 unsigned bind,
240 enum pipe_resource_usage usage,
241 unsigned size )
242 {
243 struct pipe_resource buffer;
244 memset(&buffer, 0, sizeof buffer);
245 buffer.target = PIPE_BUFFER;
246 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
247 buffer.bind = bind;
248 buffer.usage = usage;
249 buffer.flags = 0;
250 buffer.width0 = size;
251 buffer.height0 = 1;
252 buffer.depth0 = 1;
253 buffer.array_size = 1;
254 return screen->resource_create(screen, &buffer);
255 }
256
257
258 /**
259 * Map a range of a resource.
260 * \param offset start of region, in bytes
261 * \param length size of region, in bytes
262 * \param access bitmask of PIPE_TRANSFER_x flags
263 * \param transfer returns a transfer object
264 */
265 static inline void *
pipe_buffer_map_range(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned offset,unsigned length,unsigned access,struct pipe_transfer ** transfer)266 pipe_buffer_map_range(struct pipe_context *pipe,
267 struct pipe_resource *buffer,
268 unsigned offset,
269 unsigned length,
270 unsigned access,
271 struct pipe_transfer **transfer)
272 {
273 struct pipe_box box;
274 void *map;
275
276 assert(offset < buffer->width0);
277 assert(offset + length <= buffer->width0);
278 assert(length);
279
280 u_box_1d(offset, length, &box);
281
282 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
283 if (!map) {
284 return NULL;
285 }
286
287 return map;
288 }
289
290
291 /**
292 * Map whole resource.
293 * \param access bitmask of PIPE_TRANSFER_x flags
294 * \param transfer returns a transfer object
295 */
296 static inline void *
pipe_buffer_map(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned access,struct pipe_transfer ** transfer)297 pipe_buffer_map(struct pipe_context *pipe,
298 struct pipe_resource *buffer,
299 unsigned access,
300 struct pipe_transfer **transfer)
301 {
302 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
303 }
304
305
306 static inline void
pipe_buffer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)307 pipe_buffer_unmap(struct pipe_context *pipe,
308 struct pipe_transfer *transfer)
309 {
310 pipe->transfer_unmap(pipe, transfer);
311 }
312
313 static inline void
pipe_buffer_flush_mapped_range(struct pipe_context * pipe,struct pipe_transfer * transfer,unsigned offset,unsigned length)314 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
315 struct pipe_transfer *transfer,
316 unsigned offset,
317 unsigned length)
318 {
319 struct pipe_box box;
320 int transfer_offset;
321
322 assert(length);
323 assert(transfer->box.x <= (int) offset);
324 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
325
326 /* Match old screen->buffer_flush_mapped_range() behaviour, where
327 * offset parameter is relative to the start of the buffer, not the
328 * mapped range.
329 */
330 transfer_offset = offset - transfer->box.x;
331
332 u_box_1d(transfer_offset, length, &box);
333
334 pipe->transfer_flush_region(pipe, transfer, &box);
335 }
336
337 static inline void
pipe_buffer_write(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)338 pipe_buffer_write(struct pipe_context *pipe,
339 struct pipe_resource *buf,
340 unsigned offset,
341 unsigned size,
342 const void *data)
343 {
344 /* Don't set any other usage bits. Drivers should derive them. */
345 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
346 }
347
348 /**
349 * Special case for writing non-overlapping ranges.
350 *
351 * We can avoid GPU/CPU synchronization when writing range that has never
352 * been written before.
353 */
354 static inline void
pipe_buffer_write_nooverlap(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)355 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
356 struct pipe_resource *buf,
357 unsigned offset, unsigned size,
358 const void *data)
359 {
360 pipe->buffer_subdata(pipe, buf,
361 (PIPE_TRANSFER_WRITE |
362 PIPE_TRANSFER_UNSYNCHRONIZED),
363 offset, size, data);
364 }
365
366
367 /**
368 * Create a new resource and immediately put data into it
369 * \param bind bitmask of PIPE_BIND_x flags
370 * \param usage bitmask of PIPE_USAGE_x flags
371 */
372 static inline struct pipe_resource *
pipe_buffer_create_with_data(struct pipe_context * pipe,unsigned bind,enum pipe_resource_usage usage,unsigned size,const void * ptr)373 pipe_buffer_create_with_data(struct pipe_context *pipe,
374 unsigned bind,
375 enum pipe_resource_usage usage,
376 unsigned size,
377 const void *ptr)
378 {
379 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
380 bind, usage, size);
381 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
382 return res;
383 }
384
385 static inline void
pipe_buffer_read(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,void * data)386 pipe_buffer_read(struct pipe_context *pipe,
387 struct pipe_resource *buf,
388 unsigned offset,
389 unsigned size,
390 void *data)
391 {
392 struct pipe_transfer *src_transfer;
393 ubyte *map;
394
395 map = (ubyte *) pipe_buffer_map_range(pipe,
396 buf,
397 offset, size,
398 PIPE_TRANSFER_READ,
399 &src_transfer);
400 if (!map)
401 return;
402
403 memcpy(data, map, size);
404 pipe_buffer_unmap(pipe, src_transfer);
405 }
406
407
408 /**
409 * Map a resource for reading/writing.
410 * \param access bitmask of PIPE_TRANSFER_x flags
411 */
412 static inline void *
pipe_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned layer,unsigned access,unsigned x,unsigned y,unsigned w,unsigned h,struct pipe_transfer ** transfer)413 pipe_transfer_map(struct pipe_context *context,
414 struct pipe_resource *resource,
415 unsigned level, unsigned layer,
416 unsigned access,
417 unsigned x, unsigned y,
418 unsigned w, unsigned h,
419 struct pipe_transfer **transfer)
420 {
421 struct pipe_box box;
422 u_box_2d_zslice(x, y, layer, w, h, &box);
423 return context->transfer_map(context,
424 resource,
425 level,
426 access,
427 &box, transfer);
428 }
429
430
431 /**
432 * Map a 3D (texture) resource for reading/writing.
433 * \param access bitmask of PIPE_TRANSFER_x flags
434 */
435 static inline void *
pipe_transfer_map_3d(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned access,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,struct pipe_transfer ** transfer)436 pipe_transfer_map_3d(struct pipe_context *context,
437 struct pipe_resource *resource,
438 unsigned level,
439 unsigned access,
440 unsigned x, unsigned y, unsigned z,
441 unsigned w, unsigned h, unsigned d,
442 struct pipe_transfer **transfer)
443 {
444 struct pipe_box box;
445 u_box_3d(x, y, z, w, h, d, &box);
446 return context->transfer_map(context,
447 resource,
448 level,
449 access,
450 &box, transfer);
451 }
452
453 static inline void
pipe_transfer_unmap(struct pipe_context * context,struct pipe_transfer * transfer)454 pipe_transfer_unmap( struct pipe_context *context,
455 struct pipe_transfer *transfer )
456 {
457 context->transfer_unmap( context, transfer );
458 }
459
460 static inline void
pipe_set_constant_buffer(struct pipe_context * pipe,uint shader,uint index,struct pipe_resource * buf)461 pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
462 struct pipe_resource *buf)
463 {
464 if (buf) {
465 struct pipe_constant_buffer cb;
466 cb.buffer = buf;
467 cb.buffer_offset = 0;
468 cb.buffer_size = buf->width0;
469 cb.user_buffer = NULL;
470 pipe->set_constant_buffer(pipe, shader, index, &cb);
471 } else {
472 pipe->set_constant_buffer(pipe, shader, index, NULL);
473 }
474 }
475
476
477 /**
478 * Get the polygon offset enable/disable flag for the given polygon fill mode.
479 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
480 */
481 static inline boolean
util_get_offset(const struct pipe_rasterizer_state * templ,unsigned fill_mode)482 util_get_offset(const struct pipe_rasterizer_state *templ,
483 unsigned fill_mode)
484 {
485 switch(fill_mode) {
486 case PIPE_POLYGON_MODE_POINT:
487 return templ->offset_point;
488 case PIPE_POLYGON_MODE_LINE:
489 return templ->offset_line;
490 case PIPE_POLYGON_MODE_FILL:
491 return templ->offset_tri;
492 default:
493 assert(0);
494 return FALSE;
495 }
496 }
497
498 static inline float
util_get_min_point_size(const struct pipe_rasterizer_state * state)499 util_get_min_point_size(const struct pipe_rasterizer_state *state)
500 {
501 /* The point size should be clamped to this value at the rasterizer stage.
502 */
503 return !state->point_quad_rasterization &&
504 !state->point_smooth &&
505 !state->multisample ? 1.0f : 0.0f;
506 }
507
508 static inline void
util_query_clear_result(union pipe_query_result * result,unsigned type)509 util_query_clear_result(union pipe_query_result *result, unsigned type)
510 {
511 switch (type) {
512 case PIPE_QUERY_OCCLUSION_PREDICATE:
513 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
514 case PIPE_QUERY_GPU_FINISHED:
515 result->b = FALSE;
516 break;
517 case PIPE_QUERY_OCCLUSION_COUNTER:
518 case PIPE_QUERY_TIMESTAMP:
519 case PIPE_QUERY_TIME_ELAPSED:
520 case PIPE_QUERY_PRIMITIVES_GENERATED:
521 case PIPE_QUERY_PRIMITIVES_EMITTED:
522 result->u64 = 0;
523 break;
524 case PIPE_QUERY_SO_STATISTICS:
525 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
526 break;
527 case PIPE_QUERY_TIMESTAMP_DISJOINT:
528 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
529 break;
530 case PIPE_QUERY_PIPELINE_STATISTICS:
531 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
532 break;
533 default:
534 memset(result, 0, sizeof(*result));
535 }
536 }
537
538 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
539 static inline unsigned
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,unsigned nr_samples)540 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
541 unsigned nr_samples)
542 {
543 switch (pipe_tex_target) {
544 case PIPE_BUFFER:
545 return TGSI_TEXTURE_BUFFER;
546
547 case PIPE_TEXTURE_1D:
548 assert(nr_samples <= 1);
549 return TGSI_TEXTURE_1D;
550
551 case PIPE_TEXTURE_2D:
552 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
553
554 case PIPE_TEXTURE_RECT:
555 assert(nr_samples <= 1);
556 return TGSI_TEXTURE_RECT;
557
558 case PIPE_TEXTURE_3D:
559 assert(nr_samples <= 1);
560 return TGSI_TEXTURE_3D;
561
562 case PIPE_TEXTURE_CUBE:
563 assert(nr_samples <= 1);
564 return TGSI_TEXTURE_CUBE;
565
566 case PIPE_TEXTURE_1D_ARRAY:
567 assert(nr_samples <= 1);
568 return TGSI_TEXTURE_1D_ARRAY;
569
570 case PIPE_TEXTURE_2D_ARRAY:
571 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
572 TGSI_TEXTURE_2D_ARRAY;
573
574 case PIPE_TEXTURE_CUBE_ARRAY:
575 return TGSI_TEXTURE_CUBE_ARRAY;
576
577 default:
578 assert(0 && "unexpected texture target");
579 return TGSI_TEXTURE_UNKNOWN;
580 }
581 }
582
583
584 static inline void
util_copy_constant_buffer(struct pipe_constant_buffer * dst,const struct pipe_constant_buffer * src)585 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
586 const struct pipe_constant_buffer *src)
587 {
588 if (src) {
589 pipe_resource_reference(&dst->buffer, src->buffer);
590 dst->buffer_offset = src->buffer_offset;
591 dst->buffer_size = src->buffer_size;
592 dst->user_buffer = src->user_buffer;
593 }
594 else {
595 pipe_resource_reference(&dst->buffer, NULL);
596 dst->buffer_offset = 0;
597 dst->buffer_size = 0;
598 dst->user_buffer = NULL;
599 }
600 }
601
602 static inline void
util_copy_image_view(struct pipe_image_view * dst,const struct pipe_image_view * src)603 util_copy_image_view(struct pipe_image_view *dst,
604 const struct pipe_image_view *src)
605 {
606 if (src) {
607 pipe_resource_reference(&dst->resource, src->resource);
608 dst->format = src->format;
609 dst->access = src->access;
610 dst->u = src->u;
611 } else {
612 pipe_resource_reference(&dst->resource, NULL);
613 dst->format = PIPE_FORMAT_NONE;
614 dst->access = 0;
615 memset(&dst->u, 0, sizeof(dst->u));
616 }
617 }
618
619 static inline unsigned
util_max_layer(const struct pipe_resource * r,unsigned level)620 util_max_layer(const struct pipe_resource *r, unsigned level)
621 {
622 switch (r->target) {
623 case PIPE_TEXTURE_3D:
624 return u_minify(r->depth0, level) - 1;
625 case PIPE_TEXTURE_CUBE:
626 assert(r->array_size == 6);
627 /* fall-through */
628 case PIPE_TEXTURE_1D_ARRAY:
629 case PIPE_TEXTURE_2D_ARRAY:
630 case PIPE_TEXTURE_CUBE_ARRAY:
631 return r->array_size - 1;
632 default:
633 return 0;
634 }
635 }
636
637 static inline bool
util_texrange_covers_whole_level(const struct pipe_resource * tex,unsigned level,unsigned x,unsigned y,unsigned z,unsigned width,unsigned height,unsigned depth)638 util_texrange_covers_whole_level(const struct pipe_resource *tex,
639 unsigned level, unsigned x, unsigned y,
640 unsigned z, unsigned width,
641 unsigned height, unsigned depth)
642 {
643 return x == 0 && y == 0 && z == 0 &&
644 width == u_minify(tex->width0, level) &&
645 height == u_minify(tex->height0, level) &&
646 depth == util_max_layer(tex, level) + 1;
647 }
648
649 #ifdef __cplusplus
650 }
651 #endif
652
653 #endif /* U_INLINES_H */
654