1 /**************************************************************************
2 *
3 * Copyright 2012 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/format/format_utils.h"
29 #include "util/u_cpu_detect.h"
30 #include "util/u_helpers.h"
31 #include "util/u_inlines.h"
32 #include "util/u_upload_mgr.h"
33 #include "util/u_thread.h"
34 #include "util/os_time.h"
35 #include "util/perf/cpu_trace.h"
36 #include <inttypes.h>
37
38 /**
39 * This function is used to copy an array of pipe_vertex_buffer structures,
40 * while properly referencing the pipe_vertex_buffer::buffer member.
41 *
42 * enabled_buffers is updated such that the bits corresponding to the indices
43 * of disabled buffers are set to 0 and the enabled ones are set to 1.
44 *
45 * \sa util_copy_framebuffer_state
46 */
util_set_vertex_buffers_mask(struct pipe_vertex_buffer * dst,uint32_t * enabled_buffers,const struct pipe_vertex_buffer * src,unsigned count,bool take_ownership)47 void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
48 uint32_t *enabled_buffers,
49 const struct pipe_vertex_buffer *src,
50 unsigned count,
51 bool take_ownership)
52 {
53 unsigned last_count = util_last_bit(*enabled_buffers);
54 uint32_t bitmask = 0;
55 unsigned i = 0;
56
57 assert(!count || src);
58
59 if (src) {
60 for (; i < count; i++) {
61 if (src[i].buffer.resource)
62 bitmask |= 1 << i;
63
64 pipe_vertex_buffer_unreference(&dst[i]);
65
66 if (!take_ownership && !src[i].is_user_buffer)
67 pipe_resource_reference(&dst[i].buffer.resource, src[i].buffer.resource);
68 }
69
70 /* Copy over the other members of pipe_vertex_buffer. */
71 memcpy(dst, src, count * sizeof(struct pipe_vertex_buffer));
72 }
73
74 *enabled_buffers = bitmask;
75
76 for (; i < last_count; i++)
77 pipe_vertex_buffer_unreference(&dst[i]);
78 }
79
80 /**
81 * Same as util_set_vertex_buffers_mask, but it only returns the number
82 * of bound buffers.
83 */
util_set_vertex_buffers_count(struct pipe_vertex_buffer * dst,unsigned * dst_count,const struct pipe_vertex_buffer * src,unsigned count,bool take_ownership)84 void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
85 unsigned *dst_count,
86 const struct pipe_vertex_buffer *src,
87 unsigned count,
88 bool take_ownership)
89 {
90 uint32_t enabled_buffers = 0;
91
92 for (unsigned i = 0; i < *dst_count; i++) {
93 if (dst[i].buffer.resource)
94 enabled_buffers |= (1ull << i);
95 }
96
97 util_set_vertex_buffers_mask(dst, &enabled_buffers, src, count,
98 take_ownership);
99
100 *dst_count = util_last_bit(enabled_buffers);
101 }
102
103 /**
104 * This function is used to copy an array of pipe_shader_buffer structures,
105 * while properly referencing the pipe_shader_buffer::buffer member.
106 *
107 * \sa util_set_vertex_buffer_mask
108 */
util_set_shader_buffers_mask(struct pipe_shader_buffer * dst,uint32_t * enabled_buffers,const struct pipe_shader_buffer * src,unsigned start_slot,unsigned count)109 void util_set_shader_buffers_mask(struct pipe_shader_buffer *dst,
110 uint32_t *enabled_buffers,
111 const struct pipe_shader_buffer *src,
112 unsigned start_slot, unsigned count)
113 {
114 unsigned i;
115
116 dst += start_slot;
117
118 if (src) {
119 for (i = 0; i < count; i++) {
120 pipe_resource_reference(&dst[i].buffer, src[i].buffer);
121
122 if (src[i].buffer)
123 *enabled_buffers |= (1ull << (start_slot + i));
124 else
125 *enabled_buffers &= ~(1ull << (start_slot + i));
126 }
127
128 /* Copy over the other members of pipe_shader_buffer. */
129 memcpy(dst, src, count * sizeof(struct pipe_shader_buffer));
130 }
131 else {
132 /* Unreference the buffers. */
133 for (i = 0; i < count; i++)
134 pipe_resource_reference(&dst[i].buffer, NULL);
135
136 *enabled_buffers &= ~(((1ull << count) - 1) << start_slot);
137 }
138 }
139
140 /**
141 * Given a user index buffer, save the structure to "saved", and upload it.
142 */
143 bool
util_upload_index_buffer(struct pipe_context * pipe,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw,struct pipe_resource ** out_buffer,unsigned * out_offset,unsigned alignment)144 util_upload_index_buffer(struct pipe_context *pipe,
145 const struct pipe_draw_info *info,
146 const struct pipe_draw_start_count_bias *draw,
147 struct pipe_resource **out_buffer,
148 unsigned *out_offset, unsigned alignment)
149 {
150 unsigned start_offset = draw->start * info->index_size;
151
152 u_upload_data(pipe->stream_uploader, start_offset,
153 draw->count * info->index_size, alignment,
154 (char*)info->index.user + start_offset,
155 out_offset, out_buffer);
156 u_upload_unmap(pipe->stream_uploader);
157 *out_offset -= start_offset;
158 return *out_buffer != NULL;
159 }
160
161 /**
162 * Lower each UINT64 vertex element to 1 or 2 UINT32 vertex elements.
163 * 3 and 4 component formats are expanded into 2 slots.
164 *
165 * @param velems Original vertex elements, will be updated to contain
166 * the lowered vertex elements.
167 * @param velem_count Original count, will be updated to contain the count
168 * after lowering.
169 * @param tmp Temporary array of PIPE_MAX_ATTRIBS vertex elements.
170 */
171 void
util_lower_uint64_vertex_elements(const struct pipe_vertex_element ** velems,unsigned * velem_count,struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS])172 util_lower_uint64_vertex_elements(const struct pipe_vertex_element **velems,
173 unsigned *velem_count,
174 struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS])
175 {
176 const struct pipe_vertex_element *input = *velems;
177 unsigned count = *velem_count;
178 bool has_64bit = false;
179
180 for (unsigned i = 0; i < count; i++) {
181 has_64bit |= input[i].src_format >= PIPE_FORMAT_R64_UINT &&
182 input[i].src_format <= PIPE_FORMAT_R64G64B64A64_UINT;
183 }
184
185 /* Return the original vertex elements if there is nothing to do. */
186 if (!has_64bit)
187 return;
188
189 /* Lower 64_UINT to 32_UINT. */
190 unsigned new_count = 0;
191
192 for (unsigned i = 0; i < count; i++) {
193 enum pipe_format format = input[i].src_format;
194
195 /* If the shader input is dvec2 or smaller, reduce the number of
196 * components to 2 at most. If the shader input is dvec3 or larger,
197 * expand the number of components to 3 at least. If the 3rd component
198 * is out of bounds, the hardware shouldn't skip loading the first
199 * 2 components.
200 */
201 if (format >= PIPE_FORMAT_R64_UINT &&
202 format <= PIPE_FORMAT_R64G64B64A64_UINT) {
203 if (input[i].dual_slot)
204 format = MAX2(format, PIPE_FORMAT_R64G64B64_UINT);
205 else
206 format = MIN2(format, PIPE_FORMAT_R64G64_UINT);
207 }
208
209 switch (format) {
210 case PIPE_FORMAT_R64_UINT:
211 tmp[new_count] = input[i];
212 tmp[new_count].src_format = PIPE_FORMAT_R32G32_UINT;
213 new_count++;
214 break;
215
216 case PIPE_FORMAT_R64G64_UINT:
217 tmp[new_count] = input[i];
218 tmp[new_count].src_format = PIPE_FORMAT_R32G32B32A32_UINT;
219 new_count++;
220 break;
221
222 case PIPE_FORMAT_R64G64B64_UINT:
223 case PIPE_FORMAT_R64G64B64A64_UINT:
224 assert(new_count + 2 <= PIPE_MAX_ATTRIBS);
225 tmp[new_count] = tmp[new_count + 1] = input[i];
226 tmp[new_count].src_format = PIPE_FORMAT_R32G32B32A32_UINT;
227 tmp[new_count + 1].src_format =
228 format == PIPE_FORMAT_R64G64B64_UINT ?
229 PIPE_FORMAT_R32G32_UINT :
230 PIPE_FORMAT_R32G32B32A32_UINT;
231 tmp[new_count + 1].src_offset += 16;
232 new_count += 2;
233 break;
234
235 default:
236 tmp[new_count++] = input[i];
237 break;
238 }
239 }
240
241 *velem_count = new_count;
242 *velems = tmp;
243 }
244
245 /* This is a helper for hardware bring-up. Don't remove. */
246 struct pipe_query *
util_begin_pipestat_query(struct pipe_context * ctx)247 util_begin_pipestat_query(struct pipe_context *ctx)
248 {
249 struct pipe_query *q =
250 ctx->create_query(ctx, PIPE_QUERY_PIPELINE_STATISTICS, 0);
251 if (!q)
252 return NULL;
253
254 ctx->begin_query(ctx, q);
255 return q;
256 }
257
258 /* This is a helper for hardware bring-up. Don't remove. */
259 void
util_end_pipestat_query(struct pipe_context * ctx,struct pipe_query * q,FILE * f)260 util_end_pipestat_query(struct pipe_context *ctx, struct pipe_query *q,
261 FILE *f)
262 {
263 static unsigned counter;
264 struct pipe_query_data_pipeline_statistics stats;
265
266 ctx->end_query(ctx, q);
267 ctx->get_query_result(ctx, q, true, (void*)&stats);
268 ctx->destroy_query(ctx, q);
269
270 fprintf(f,
271 "Draw call %u:\n"
272 " ia_vertices = %"PRIu64"\n"
273 " ia_primitives = %"PRIu64"\n"
274 " vs_invocations = %"PRIu64"\n"
275 " gs_invocations = %"PRIu64"\n"
276 " gs_primitives = %"PRIu64"\n"
277 " c_invocations = %"PRIu64"\n"
278 " c_primitives = %"PRIu64"\n"
279 " ps_invocations = %"PRIu64"\n"
280 " hs_invocations = %"PRIu64"\n"
281 " ds_invocations = %"PRIu64"\n"
282 " cs_invocations = %"PRIu64"\n",
283 (unsigned)p_atomic_inc_return(&counter),
284 stats.ia_vertices,
285 stats.ia_primitives,
286 stats.vs_invocations,
287 stats.gs_invocations,
288 stats.gs_primitives,
289 stats.c_invocations,
290 stats.c_primitives,
291 stats.ps_invocations,
292 stats.hs_invocations,
293 stats.ds_invocations,
294 stats.cs_invocations);
295 }
296
297 /* This is a helper for profiling. Don't remove. */
298 struct pipe_query *
util_begin_time_query(struct pipe_context * ctx)299 util_begin_time_query(struct pipe_context *ctx)
300 {
301 struct pipe_query *q =
302 ctx->create_query(ctx, PIPE_QUERY_TIME_ELAPSED, 0);
303 if (!q)
304 return NULL;
305
306 ctx->begin_query(ctx, q);
307 return q;
308 }
309
310 /* This is a helper for profiling. Don't remove. */
311 void
util_end_time_query(struct pipe_context * ctx,struct pipe_query * q,FILE * f,const char * name)312 util_end_time_query(struct pipe_context *ctx, struct pipe_query *q, FILE *f,
313 const char *name)
314 {
315 union pipe_query_result result;
316
317 ctx->end_query(ctx, q);
318 ctx->get_query_result(ctx, q, true, &result);
319 ctx->destroy_query(ctx, q);
320
321 fprintf(f, "Time elapsed: %s - %"PRIu64".%u us\n", name, result.u64 / 1000, (unsigned)(result.u64 % 1000) / 100);
322 }
323
324 /* This is a helper for hardware bring-up. Don't remove. */
325 void
util_wait_for_idle(struct pipe_context * ctx)326 util_wait_for_idle(struct pipe_context *ctx)
327 {
328 struct pipe_fence_handle *fence = NULL;
329
330 ctx->flush(ctx, &fence, 0);
331 ctx->screen->fence_finish(ctx->screen, NULL, fence, OS_TIMEOUT_INFINITE);
332 }
333
334 void
util_throttle_init(struct util_throttle * t,uint64_t max_mem_usage)335 util_throttle_init(struct util_throttle *t, uint64_t max_mem_usage)
336 {
337 t->max_mem_usage = max_mem_usage;
338 }
339
340 void
util_throttle_deinit(struct pipe_screen * screen,struct util_throttle * t)341 util_throttle_deinit(struct pipe_screen *screen, struct util_throttle *t)
342 {
343 for (unsigned i = 0; i < ARRAY_SIZE(t->ring); i++)
344 screen->fence_reference(screen, &t->ring[i].fence, NULL);
345 }
346
347 static uint64_t
util_get_throttle_total_memory_usage(struct util_throttle * t)348 util_get_throttle_total_memory_usage(struct util_throttle *t)
349 {
350 uint64_t total_usage = 0;
351
352 for (unsigned i = 0; i < ARRAY_SIZE(t->ring); i++)
353 total_usage += t->ring[i].mem_usage;
354 return total_usage;
355 }
356
util_dump_throttle_ring(struct util_throttle * t)357 static void util_dump_throttle_ring(struct util_throttle *t)
358 {
359 printf("Throttle:\n");
360 for (unsigned i = 0; i < ARRAY_SIZE(t->ring); i++) {
361 printf(" ring[%u]: fence = %s, mem_usage = %"PRIu64"%s%s\n",
362 i, t->ring[i].fence ? "yes" : " no",
363 t->ring[i].mem_usage,
364 t->flush_index == i ? " [flush]" : "",
365 t->wait_index == i ? " [wait]" : "");
366 }
367 }
368
369 /**
370 * Notify util_throttle that the next operation allocates memory.
371 * util_throttle tracks memory usage and waits for fences until its tracked
372 * memory usage decreases.
373 *
374 * Example:
375 * util_throttle_memory_usage(..., w*h*d*Bpp);
376 * TexSubImage(..., w, h, d, ...);
377 *
378 * This means that TexSubImage can't allocate more memory its maximum limit
379 * set during initialization.
380 */
381 void
util_throttle_memory_usage(struct pipe_context * pipe,struct util_throttle * t,uint64_t memory_size)382 util_throttle_memory_usage(struct pipe_context *pipe,
383 struct util_throttle *t, uint64_t memory_size)
384 {
385 (void)util_dump_throttle_ring; /* silence warning */
386
387 if (!t->max_mem_usage)
388 return;
389
390 MESA_TRACE_FUNC();
391
392 struct pipe_screen *screen = pipe->screen;
393 struct pipe_fence_handle **fence = NULL;
394 unsigned ring_size = ARRAY_SIZE(t->ring);
395 uint64_t total = util_get_throttle_total_memory_usage(t);
396
397 /* If there is not enough memory, walk the list of fences and find
398 * the latest one that we need to wait for.
399 */
400 while (t->wait_index != t->flush_index &&
401 total && total + memory_size > t->max_mem_usage) {
402 assert(t->ring[t->wait_index].fence);
403
404 /* Release an older fence if we need to wait for a newer one. */
405 if (fence)
406 screen->fence_reference(screen, fence, NULL);
407
408 fence = &t->ring[t->wait_index].fence;
409 t->ring[t->wait_index].mem_usage = 0;
410 t->wait_index = (t->wait_index + 1) % ring_size;
411
412 total = util_get_throttle_total_memory_usage(t);
413 }
414
415 /* Wait for the fence to decrease memory usage. */
416 if (fence) {
417 screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
418 screen->fence_reference(screen, fence, NULL);
419 }
420
421 /* Flush and get a fence if we've exhausted memory usage for the current
422 * slot.
423 */
424 if (t->ring[t->flush_index].mem_usage &&
425 t->ring[t->flush_index].mem_usage + memory_size >
426 t->max_mem_usage / (ring_size / 2)) {
427 struct pipe_fence_handle **fence =
428 &t->ring[t->flush_index].fence;
429
430 /* Expect that the current flush slot doesn't have a fence yet. */
431 assert(!*fence);
432
433 pipe->flush(pipe, fence, PIPE_FLUSH_ASYNC);
434 t->flush_index = (t->flush_index + 1) % ring_size;
435
436 /* Vacate the next slot if it's occupied. This should be rare. */
437 if (t->flush_index == t->wait_index) {
438 struct pipe_fence_handle **fence =
439 &t->ring[t->wait_index].fence;
440
441 t->ring[t->wait_index].mem_usage = 0;
442 t->wait_index = (t->wait_index + 1) % ring_size;
443
444 assert(*fence);
445 screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
446 screen->fence_reference(screen, fence, NULL);
447 }
448
449 assert(!t->ring[t->flush_index].mem_usage);
450 assert(!t->ring[t->flush_index].fence);
451 }
452
453 t->ring[t->flush_index].mem_usage += memory_size;
454 }
455
456 void
util_sw_query_memory_info(struct pipe_screen * pscreen,struct pipe_memory_info * info)457 util_sw_query_memory_info(struct pipe_screen *pscreen,
458 struct pipe_memory_info *info)
459 {
460 /* Provide query_memory_info from CPU reported memory */
461 uint64_t size;
462
463 if (!os_get_available_system_memory(&size))
464 return;
465 info->avail_staging_memory = size / 1024;
466 if (!os_get_total_physical_memory(&size))
467 return;
468 info->total_staging_memory = size / 1024;
469 }
470
471 bool
util_lower_clearsize_to_dword(const void * clearValue,int * clearValueSize,uint32_t * clamped)472 util_lower_clearsize_to_dword(const void *clearValue, int *clearValueSize, uint32_t *clamped)
473 {
474 /* Reduce a large clear value size if possible. */
475 if (*clearValueSize > 4) {
476 bool clear_dword_duplicated = true;
477 const uint32_t *clear_value = clearValue;
478
479 /* See if we can lower large fills to dword fills. */
480 for (unsigned i = 1; i < *clearValueSize / 4; i++) {
481 if (clear_value[0] != clear_value[i]) {
482 clear_dword_duplicated = false;
483 break;
484 }
485 }
486 if (clear_dword_duplicated) {
487 *clamped = *clear_value;
488 *clearValueSize = 4;
489 }
490 return clear_dword_duplicated;
491 }
492
493 /* Expand a small clear value size. */
494 if (*clearValueSize <= 2) {
495 if (*clearValueSize == 1) {
496 *clamped = *(uint8_t *)clearValue;
497 *clamped |=
498 (*clamped << 8) | (*clamped << 16) | (*clamped << 24);
499 } else {
500 *clamped = *(uint16_t *)clearValue;
501 *clamped |= *clamped << 16;
502 }
503 *clearValueSize = 4;
504 return true;
505 }
506 return false;
507 }
508
509 void
util_init_pipe_vertex_state(struct pipe_screen * screen,struct pipe_vertex_buffer * buffer,const struct pipe_vertex_element * elements,unsigned num_elements,struct pipe_resource * indexbuf,uint32_t full_velem_mask,struct pipe_vertex_state * state)510 util_init_pipe_vertex_state(struct pipe_screen *screen,
511 struct pipe_vertex_buffer *buffer,
512 const struct pipe_vertex_element *elements,
513 unsigned num_elements,
514 struct pipe_resource *indexbuf,
515 uint32_t full_velem_mask,
516 struct pipe_vertex_state *state)
517 {
518 assert(num_elements == util_bitcount(full_velem_mask));
519
520 pipe_reference_init(&state->reference, 1);
521 state->screen = screen;
522
523 pipe_vertex_buffer_reference(&state->input.vbuffer, buffer);
524 pipe_resource_reference(&state->input.indexbuf, indexbuf);
525 state->input.num_elements = num_elements;
526 for (unsigned i = 0; i < num_elements; i++)
527 state->input.elements[i] = elements[i];
528 state->input.full_velem_mask = full_velem_mask;
529 }
530
531 /**
532 * Clamp color value to format range.
533 */
534 union pipe_color_union
util_clamp_color(enum pipe_format format,const union pipe_color_union * color)535 util_clamp_color(enum pipe_format format,
536 const union pipe_color_union *color)
537 {
538 union pipe_color_union clamp_color = *color;
539 int i;
540
541 for (i = 0; i < 4; i++) {
542 uint8_t bits = util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, i);
543
544 if (!bits)
545 continue;
546
547 if (util_format_is_unorm(format))
548 clamp_color.f[i] = SATURATE(clamp_color.f[i]);
549 else if (util_format_is_snorm(format))
550 clamp_color.f[i] = CLAMP(clamp_color.f[i], -1.0, 1.0);
551 else if (util_format_is_pure_uint(format))
552 clamp_color.ui[i] = _mesa_unsigned_to_unsigned(clamp_color.ui[i], bits);
553 else if (util_format_is_pure_sint(format))
554 clamp_color.i[i] = _mesa_signed_to_signed(clamp_color.i[i], bits);
555 }
556
557 return clamp_color;
558 }
559
560 /*
561 * Some hardware does not use a distinct descriptor for images, so it is
562 * convenient for drivers to reuse their texture descriptor packing for shader
563 * images. This helper constructs a synthetic, non-reference counted
564 * pipe_sampler_view corresponding to a given pipe_image_view for drivers'
565 * internal convenience.
566 *
567 * The returned descriptor is "synthetic" in the sense that it is not reference
568 * counted and the context field is ignored. Otherwise it's complete.
569 */
570 struct pipe_sampler_view
util_image_to_sampler_view(struct pipe_image_view * v)571 util_image_to_sampler_view(struct pipe_image_view *v)
572 {
573 struct pipe_sampler_view out = {
574 .format = v->format,
575 .is_tex2d_from_buf = v->access & PIPE_IMAGE_ACCESS_TEX2D_FROM_BUFFER,
576 .target = v->resource->target,
577 .swizzle_r = PIPE_SWIZZLE_X,
578 .swizzle_g = PIPE_SWIZZLE_Y,
579 .swizzle_b = PIPE_SWIZZLE_Z,
580 .swizzle_a = PIPE_SWIZZLE_W,
581 .texture = v->resource,
582 };
583
584 if (out.target == PIPE_BUFFER) {
585 out.u.buf.offset = v->u.buf.offset;
586 out.u.buf.size = v->u.buf.size;
587 } else if (out.is_tex2d_from_buf) {
588 out.u.tex2d_from_buf.offset = v->u.tex2d_from_buf.offset;
589 out.u.tex2d_from_buf.row_stride = v->u.tex2d_from_buf.row_stride;
590 out.u.tex2d_from_buf.width = v->u.tex2d_from_buf.width;
591 out.u.tex2d_from_buf.height = v->u.tex2d_from_buf.height;
592 } else {
593 /* For a single layer view of a multilayer image, we need to swap in the
594 * non-layered texture target to match the texture instruction.
595 */
596 if (v->u.tex.single_layer_view) {
597 switch (out.target) {
598 case PIPE_TEXTURE_1D_ARRAY:
599 /* A single layer is a 1D image */
600 out.target = PIPE_TEXTURE_1D;
601 break;
602
603 case PIPE_TEXTURE_3D:
604 case PIPE_TEXTURE_CUBE:
605 case PIPE_TEXTURE_2D_ARRAY:
606 case PIPE_TEXTURE_CUBE_ARRAY:
607 /* A single layer/face is a 2D image.
608 *
609 * Note that OpenGL does not otherwise support 2D views of 3D.
610 * Drivers that use this helper must support that anyway.
611 */
612 out.target = PIPE_TEXTURE_2D;
613 break;
614
615 default:
616 /* Other texture targets already only have 1 layer, nothing to do */
617 break;
618 }
619 }
620
621 out.u.tex.first_layer = v->u.tex.first_layer;
622 out.u.tex.last_layer = v->u.tex.last_layer;
623
624 /* Single level only */
625 out.u.tex.first_level = v->u.tex.level;
626 out.u.tex.last_level = v->u.tex.level;
627 }
628
629 return out;
630 }
631