1 /*
2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018-2019 Alyssa Rosenzweig
5 * Copyright (C) 2019-2020 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #include "util/macros.h"
29 #include "util/u_math.h"
30 #include "pan_texture.h"
31
32 #if PAN_ARCH >= 5
33 /*
34 * Arm Scalable Texture Compression (ASTC) corresponds to just a few formats.
35 * The block dimension is not part of the format. Instead, it is encoded as a
36 * 6-bit tag on the payload pointer. Map the block size for a single dimension.
37 */
38 static inline enum mali_astc_2d_dimension
panfrost_astc_dim_2d(unsigned dim)39 panfrost_astc_dim_2d(unsigned dim)
40 {
41 switch (dim) {
42 case 4: return MALI_ASTC_2D_DIMENSION_4;
43 case 5: return MALI_ASTC_2D_DIMENSION_5;
44 case 6: return MALI_ASTC_2D_DIMENSION_6;
45 case 8: return MALI_ASTC_2D_DIMENSION_8;
46 case 10: return MALI_ASTC_2D_DIMENSION_10;
47 case 12: return MALI_ASTC_2D_DIMENSION_12;
48 default: unreachable("Invalid ASTC dimension");
49 }
50 }
51
52 static inline enum mali_astc_3d_dimension
panfrost_astc_dim_3d(unsigned dim)53 panfrost_astc_dim_3d(unsigned dim)
54 {
55 switch (dim) {
56 case 3: return MALI_ASTC_3D_DIMENSION_3;
57 case 4: return MALI_ASTC_3D_DIMENSION_4;
58 case 5: return MALI_ASTC_3D_DIMENSION_5;
59 case 6: return MALI_ASTC_3D_DIMENSION_6;
60 default: unreachable("Invalid ASTC dimension");
61 }
62 }
63 #endif
64
65 /* Texture addresses are tagged with information about compressed formats.
66 * AFBC uses a bit for whether the colorspace transform is enabled (RGB and
67 * RGBA only).
68 * For ASTC, this is a "stretch factor" encoding the block size. */
69
70 static unsigned
panfrost_compression_tag(const struct util_format_description * desc,enum mali_texture_dimension dim,uint64_t modifier)71 panfrost_compression_tag(const struct util_format_description *desc,
72 enum mali_texture_dimension dim,
73 uint64_t modifier)
74 {
75 #if PAN_ARCH >= 5 && PAN_ARCH <= 8
76 if (drm_is_afbc(modifier)) {
77 unsigned flags = (modifier & AFBC_FORMAT_MOD_YTR) ?
78 MALI_AFBC_SURFACE_FLAG_YTR : 0;
79
80 #if PAN_ARCH >= 6
81 /* Prefetch enable */
82 flags |= MALI_AFBC_SURFACE_FLAG_PREFETCH;
83
84 if (panfrost_afbc_is_wide(modifier))
85 flags |= MALI_AFBC_SURFACE_FLAG_WIDE_BLOCK;
86 #endif
87
88 #if PAN_ARCH >= 7
89 /* Tiled headers */
90 if (modifier & AFBC_FORMAT_MOD_TILED)
91 flags |= MALI_AFBC_SURFACE_FLAG_TILED_HEADER;
92
93 /* Used to make sure AFBC headers don't point outside the AFBC
94 * body. HW is using the AFBC surface stride to do this check,
95 * which doesn't work for 3D textures because the surface
96 * stride does not cover the body. Only supported on v7+.
97 */
98 if (dim != MALI_TEXTURE_DIMENSION_3D)
99 flags |= MALI_AFBC_SURFACE_FLAG_CHECK_PAYLOAD_RANGE;
100 #endif
101
102 return flags;
103 } else if (desc->layout == UTIL_FORMAT_LAYOUT_ASTC) {
104 if (desc->block.depth > 1) {
105 return (panfrost_astc_dim_3d(desc->block.depth) << 4) |
106 (panfrost_astc_dim_3d(desc->block.height) << 2) |
107 panfrost_astc_dim_3d(desc->block.width);
108 } else {
109 return (panfrost_astc_dim_2d(desc->block.height) << 3) |
110 panfrost_astc_dim_2d(desc->block.width);
111 }
112 }
113 #endif
114
115 /* Tags are not otherwise used */
116 return 0;
117 }
118
119 /* Cubemaps have 6 faces as "layers" in between each actual layer. We
120 * need to fix this up. TODO: logic wrong in the asserted out cases ...
121 * can they happen, perhaps from cubemap arrays? */
122
123 static void
panfrost_adjust_cube_dimensions(unsigned * first_face,unsigned * last_face,unsigned * first_layer,unsigned * last_layer)124 panfrost_adjust_cube_dimensions(
125 unsigned *first_face, unsigned *last_face,
126 unsigned *first_layer, unsigned *last_layer)
127 {
128 *first_face = *first_layer % 6;
129 *last_face = *last_layer % 6;
130 *first_layer /= 6;
131 *last_layer /= 6;
132
133 assert((*first_layer == *last_layer) || (*first_face == 0 && *last_face == 5));
134 }
135
136 /* Following the texture descriptor is a number of descriptors. How many? */
137
138 static unsigned
panfrost_texture_num_elements(unsigned first_level,unsigned last_level,unsigned first_layer,unsigned last_layer,unsigned nr_samples,bool is_cube)139 panfrost_texture_num_elements(
140 unsigned first_level, unsigned last_level,
141 unsigned first_layer, unsigned last_layer,
142 unsigned nr_samples, bool is_cube)
143 {
144 unsigned first_face = 0, last_face = 0;
145
146 if (is_cube) {
147 panfrost_adjust_cube_dimensions(&first_face, &last_face,
148 &first_layer, &last_layer);
149 }
150
151 unsigned levels = 1 + last_level - first_level;
152 unsigned layers = 1 + last_layer - first_layer;
153 unsigned faces = 1 + last_face - first_face;
154
155 return levels * layers * faces * MAX2(nr_samples, 1);
156 }
157
158 /* Conservative estimate of the size of the texture payload a priori.
159 * Average case, size equal to the actual size. Worst case, off by 2x (if
160 * a manual stride is not needed on a linear texture). Returned value
161 * must be greater than or equal to the actual size, so it's safe to use
162 * as an allocation amount */
163
164 unsigned
GENX(panfrost_estimate_texture_payload_size)165 GENX(panfrost_estimate_texture_payload_size)(const struct pan_image_view *iview)
166 {
167 #if PAN_ARCH >= 9
168 size_t element_size = pan_size(PLANE);
169 #else
170 /* Assume worst case. Overestimates on Midgard, but that's ok. */
171 size_t element_size = pan_size(SURFACE_WITH_STRIDE);
172 #endif
173
174 unsigned elements =
175 panfrost_texture_num_elements(iview->first_level, iview->last_level,
176 iview->first_layer, iview->last_layer,
177 iview->image->layout.nr_samples,
178 iview->dim == MALI_TEXTURE_DIMENSION_CUBE);
179
180 return element_size * elements;
181 }
182
183 struct panfrost_surface_iter {
184 unsigned layer, last_layer;
185 unsigned level, first_level, last_level;
186 unsigned face, first_face, last_face;
187 unsigned sample, first_sample, last_sample;
188 };
189
190 static void
panfrost_surface_iter_begin(struct panfrost_surface_iter * iter,unsigned first_layer,unsigned last_layer,unsigned first_level,unsigned last_level,unsigned first_face,unsigned last_face,unsigned nr_samples)191 panfrost_surface_iter_begin(struct panfrost_surface_iter *iter,
192 unsigned first_layer, unsigned last_layer,
193 unsigned first_level, unsigned last_level,
194 unsigned first_face, unsigned last_face,
195 unsigned nr_samples)
196 {
197 iter->layer = first_layer;
198 iter->last_layer = last_layer;
199 iter->level = iter->first_level = first_level;
200 iter->last_level = last_level;
201 iter->face = iter->first_face = first_face;
202 iter->last_face = last_face;
203 iter->sample = iter->first_sample = 0;
204 iter->last_sample = nr_samples - 1;
205 }
206
207 static bool
panfrost_surface_iter_end(const struct panfrost_surface_iter * iter)208 panfrost_surface_iter_end(const struct panfrost_surface_iter *iter)
209 {
210 return iter->layer > iter->last_layer;
211 }
212
213 static void
panfrost_surface_iter_next(struct panfrost_surface_iter * iter)214 panfrost_surface_iter_next(struct panfrost_surface_iter *iter)
215 {
216 #define INC_TEST(field) \
217 do { \
218 if (iter->field++ < iter->last_ ## field) \
219 return; \
220 iter->field = iter->first_ ## field; \
221 } while (0)
222
223 /* Ordering is different on v7: inner loop is iterating on levels */
224 if (PAN_ARCH >= 7)
225 INC_TEST(level);
226
227 INC_TEST(sample);
228 INC_TEST(face);
229
230 if (PAN_ARCH < 7)
231 INC_TEST(level);
232
233 iter->layer++;
234
235 #undef INC_TEST
236 }
237
238 static void
panfrost_get_surface_strides(const struct pan_image_layout * layout,unsigned l,int32_t * row_stride,int32_t * surf_stride)239 panfrost_get_surface_strides(const struct pan_image_layout *layout,
240 unsigned l,
241 int32_t *row_stride, int32_t *surf_stride)
242 {
243 const struct pan_image_slice_layout *slice = &layout->slices[l];
244
245 if (drm_is_afbc(layout->modifier)) {
246 /* Pre v7 don't have a row stride field. This field is
247 * repurposed as a Y offset which we don't use */
248 *row_stride = PAN_ARCH < 7 ? 0 : slice->row_stride;
249 *surf_stride = slice->afbc.surface_stride;
250
251 /* Row stride alignment requirement does not apply to AFBC */
252 } else {
253 *row_stride = slice->row_stride;
254 *surf_stride = slice->surface_stride;
255
256 /* Particular for linear, the row stride must be aligned */
257 assert(pan_is_stride_aligned(layout->format, *row_stride));
258 }
259
260 /* All surface strides are aligned, required for linear */
261 assert(pan_is_stride_aligned(layout->format, *surf_stride));
262 }
263
264 static mali_ptr
panfrost_get_surface_pointer(const struct pan_image_layout * layout,enum mali_texture_dimension dim,mali_ptr base,unsigned l,unsigned w,unsigned f,unsigned s)265 panfrost_get_surface_pointer(const struct pan_image_layout *layout,
266 enum mali_texture_dimension dim,
267 mali_ptr base,
268 unsigned l, unsigned w, unsigned f, unsigned s)
269 {
270 unsigned face_mult = dim == MALI_TEXTURE_DIMENSION_CUBE ? 6 : 1;
271 unsigned offset;
272
273 if (layout->dim == MALI_TEXTURE_DIMENSION_3D) {
274 assert(!f && !s);
275 offset = layout->slices[l].offset +
276 (w * panfrost_get_layer_stride(layout, l));
277 } else {
278 offset = panfrost_texture_offset(layout, l, (w * face_mult) + f, s);
279 }
280
281 return base + offset;
282 }
283
284 #if PAN_ARCH >= 9
285
286 #define CLUMP_FMT(pipe, mali) [PIPE_FORMAT_ ## pipe] = MALI_CLUMP_FORMAT_ ## mali
287 static enum mali_clump_format special_clump_formats[PIPE_FORMAT_COUNT] = {
288 CLUMP_FMT(X32_S8X24_UINT, X32S8X24),
289 CLUMP_FMT(X24S8_UINT, X24S8),
290 CLUMP_FMT(S8X24_UINT, S8X24),
291 CLUMP_FMT(S8_UINT, S8),
292 CLUMP_FMT(L4A4_UNORM, L4A4),
293 CLUMP_FMT(L8A8_UNORM, L8A8),
294 CLUMP_FMT(L8A8_UINT, L8A8),
295 CLUMP_FMT(L8A8_SINT, L8A8),
296 CLUMP_FMT(A8_UNORM, A8),
297 CLUMP_FMT(A8_UINT, A8),
298 CLUMP_FMT(A8_SINT, A8),
299 CLUMP_FMT(ETC1_RGB8, ETC2_RGB8),
300 CLUMP_FMT(ETC2_RGB8, ETC2_RGB8),
301 CLUMP_FMT(ETC2_SRGB8, ETC2_RGB8),
302 CLUMP_FMT(ETC2_RGB8A1, ETC2_RGB8A1),
303 CLUMP_FMT(ETC2_SRGB8A1, ETC2_RGB8A1),
304 CLUMP_FMT(ETC2_RGBA8, ETC2_RGBA8),
305 CLUMP_FMT(ETC2_SRGBA8, ETC2_RGBA8),
306 CLUMP_FMT(ETC2_R11_UNORM, ETC2_R11_UNORM),
307 CLUMP_FMT(ETC2_R11_SNORM, ETC2_R11_SNORM),
308 CLUMP_FMT(ETC2_RG11_UNORM, ETC2_RG11_UNORM),
309 CLUMP_FMT(ETC2_RG11_SNORM, ETC2_RG11_SNORM),
310 CLUMP_FMT(DXT1_RGB, BC1_UNORM),
311 CLUMP_FMT(DXT1_RGBA, BC1_UNORM),
312 CLUMP_FMT(DXT1_SRGB, BC1_UNORM),
313 CLUMP_FMT(DXT1_SRGBA, BC1_UNORM),
314 CLUMP_FMT(DXT3_RGBA, BC2_UNORM),
315 CLUMP_FMT(DXT3_SRGBA, BC2_UNORM),
316 CLUMP_FMT(DXT5_RGBA, BC3_UNORM),
317 CLUMP_FMT(DXT5_SRGBA, BC3_UNORM),
318 CLUMP_FMT(RGTC1_UNORM, BC4_UNORM),
319 CLUMP_FMT(RGTC1_SNORM, BC4_SNORM),
320 CLUMP_FMT(RGTC2_UNORM, BC5_UNORM),
321 CLUMP_FMT(RGTC2_SNORM, BC5_SNORM),
322 CLUMP_FMT(BPTC_RGB_FLOAT, BC6H_SF16),
323 CLUMP_FMT(BPTC_RGB_UFLOAT, BC6H_UF16),
324 CLUMP_FMT(BPTC_RGBA_UNORM, BC7_UNORM),
325 CLUMP_FMT(BPTC_SRGBA, BC7_UNORM),
326 };
327 #undef CLUMP_FMT
328
329 static enum mali_clump_format
panfrost_clump_format(enum pipe_format format)330 panfrost_clump_format(enum pipe_format format)
331 {
332 /* First, try a special clump format. Note that the 0 encoding is for a
333 * raw clump format, which will never be in the special table.
334 */
335 if (special_clump_formats[format])
336 return special_clump_formats[format];
337
338 /* Else, it's a raw format. Raw formats must not be compressed. */
339 assert(!util_format_is_compressed(format));
340
341 /* Select the appropriate raw format. */
342 switch (util_format_get_blocksize(format)) {
343 case 1: return MALI_CLUMP_FORMAT_RAW8;
344 case 2: return MALI_CLUMP_FORMAT_RAW16;
345 case 3: return MALI_CLUMP_FORMAT_RAW24;
346 case 4: return MALI_CLUMP_FORMAT_RAW32;
347 case 6: return MALI_CLUMP_FORMAT_RAW48;
348 case 8: return MALI_CLUMP_FORMAT_RAW64;
349 case 12: return MALI_CLUMP_FORMAT_RAW96;
350 case 16: return MALI_CLUMP_FORMAT_RAW128;
351 default: unreachable("Invalid bpp");
352 }
353 }
354
355 static enum mali_afbc_superblock_size
translate_superblock_size(uint64_t modifier)356 translate_superblock_size(uint64_t modifier)
357 {
358 assert(drm_is_afbc(modifier));
359
360 switch (modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
361 case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
362 return MALI_AFBC_SUPERBLOCK_SIZE_16X16;
363 case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
364 return MALI_AFBC_SUPERBLOCK_SIZE_32X8;
365 case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
366 return MALI_AFBC_SUPERBLOCK_SIZE_64X4;
367 default:
368 unreachable("Invalid superblock size");
369 }
370 }
371
372 static void
panfrost_emit_plane(const struct pan_image_layout * layout,enum pipe_format format,mali_ptr pointer,unsigned level,void * payload)373 panfrost_emit_plane(const struct pan_image_layout *layout,
374 enum pipe_format format,
375 mali_ptr pointer,
376 unsigned level,
377 void *payload)
378 {
379 const struct util_format_description *desc =
380 util_format_description(layout->format);
381
382 int32_t row_stride, surface_stride;
383
384 panfrost_get_surface_strides(layout, level, &row_stride, &surface_stride);
385 assert(row_stride >= 0 && surface_stride >= 0 && "negative stride");
386
387 bool afbc = drm_is_afbc(layout->modifier);
388
389 pan_pack(payload, PLANE, cfg) {
390 cfg.pointer = pointer;
391 cfg.row_stride = row_stride;
392 cfg.size = layout->data_size - layout->slices[level].offset;
393
394 cfg.slice_stride = layout->nr_samples ?
395 layout->slices[level].surface_stride :
396 panfrost_get_layer_stride(layout, level);
397
398 if (desc->layout == UTIL_FORMAT_LAYOUT_ASTC) {
399 assert(!afbc);
400
401 if (desc->block.depth > 1) {
402 cfg.plane_type = MALI_PLANE_TYPE_ASTC_3D;
403 cfg.astc._3d.block_width = panfrost_astc_dim_3d(desc->block.width);
404 cfg.astc._3d.block_height = panfrost_astc_dim_3d(desc->block.height);
405 cfg.astc._3d.block_depth = panfrost_astc_dim_3d(desc->block.depth);
406 } else {
407 cfg.plane_type = MALI_PLANE_TYPE_ASTC_2D;
408 cfg.astc._2d.block_width = panfrost_astc_dim_2d(desc->block.width);
409 cfg.astc._2d.block_height = panfrost_astc_dim_2d(desc->block.height);
410 }
411
412 bool srgb = (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
413
414 /* Mesa does not advertise _HDR formats yet */
415 cfg.astc.decode_hdr = false;
416
417 /* sRGB formats decode to RGBA8 sRGB, which is narrow.
418 *
419 * Non-sRGB formats decode to RGBA16F which is wide.
420 * With a future extension, we could decode non-sRGB
421 * formats narrowly too, but this isn't wired up in Mesa
422 * yet.
423 */
424 cfg.astc.decode_wide = !srgb;
425 } else if (afbc) {
426 cfg.plane_type = MALI_PLANE_TYPE_AFBC;
427 cfg.afbc.superblock_size = translate_superblock_size(layout->modifier);
428 cfg.afbc.ytr = (layout->modifier & AFBC_FORMAT_MOD_YTR);
429 cfg.afbc.tiled_header = (layout->modifier & AFBC_FORMAT_MOD_TILED);
430 cfg.afbc.prefetch = true;
431 cfg.afbc.compression_mode = pan_afbc_compression_mode(format);
432 cfg.afbc.header_stride = layout->slices[level].afbc.header_size;
433 } else {
434 cfg.plane_type = MALI_PLANE_TYPE_GENERIC;
435 cfg.clump_format = panfrost_clump_format(format);
436 }
437
438 if (!afbc && layout->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED)
439 cfg.clump_ordering = MALI_CLUMP_ORDERING_TILED_U_INTERLEAVED;
440 else if (!afbc)
441 cfg.clump_ordering = MALI_CLUMP_ORDERING_LINEAR;
442 }
443 }
444 #endif
445
446 static void
panfrost_emit_texture_payload(const struct pan_image_view * iview,enum pipe_format format,void * payload)447 panfrost_emit_texture_payload(const struct pan_image_view *iview,
448 enum pipe_format format,
449 void *payload)
450 {
451 const struct pan_image_layout *layout = &iview->image->layout;
452 ASSERTED const struct util_format_description *desc =
453 util_format_description(format);
454
455 mali_ptr base = iview->image->data.bo->ptr.gpu + iview->image->data.offset;
456
457 if (iview->buf.size) {
458 assert (iview->dim == MALI_TEXTURE_DIMENSION_1D);
459 base += iview->buf.offset;
460 }
461
462 /* panfrost_compression_tag() wants the dimension of the resource, not the
463 * one of the image view (those might differ).
464 */
465 base |= panfrost_compression_tag(desc, layout->dim, layout->modifier);
466
467 /* v4 does not support compression */
468 assert(PAN_ARCH >= 5 || !drm_is_afbc(layout->modifier));
469 assert(PAN_ARCH >= 5 || desc->layout != UTIL_FORMAT_LAYOUT_ASTC);
470
471 /* Inject the addresses in, interleaving array indices, mip levels,
472 * cube faces, and strides in that order. On Bifrost and older, each
473 * sample had its own surface descriptor; on Valhall, they are fused
474 * into a single plane descriptor.
475 */
476
477 unsigned first_layer = iview->first_layer, last_layer = iview->last_layer;
478 unsigned nr_samples = PAN_ARCH <= 7 ? layout->nr_samples : 1;
479 unsigned first_face = 0, last_face = 0;
480
481 if (iview->dim == MALI_TEXTURE_DIMENSION_CUBE) {
482 panfrost_adjust_cube_dimensions(&first_face, &last_face,
483 &first_layer, &last_layer);
484 }
485
486 struct panfrost_surface_iter iter;
487
488 for (panfrost_surface_iter_begin(&iter, first_layer, last_layer,
489 iview->first_level, iview->last_level,
490 first_face, last_face, nr_samples);
491 !panfrost_surface_iter_end(&iter);
492 panfrost_surface_iter_next(&iter)) {
493 mali_ptr pointer =
494 panfrost_get_surface_pointer(layout, iview->dim, base,
495 iter.level, iter.layer,
496 iter.face, iter.sample);
497
498 #if PAN_ARCH >= 9
499 panfrost_emit_plane(layout, format, pointer, iter.level, payload);
500 payload += pan_size(PLANE);
501 #else
502 pan_pack(payload, SURFACE_WITH_STRIDE, cfg) {
503 cfg.pointer = pointer;
504 panfrost_get_surface_strides(layout, iter.level,
505 &cfg.row_stride,
506 &cfg.surface_stride);
507 }
508 payload += pan_size(SURFACE_WITH_STRIDE);
509 #endif
510 }
511 }
512
513 #if PAN_ARCH <= 7
514 /* Map modifiers to mali_texture_layout for packing in a texture descriptor */
515
516 static enum mali_texture_layout
panfrost_modifier_to_layout(uint64_t modifier)517 panfrost_modifier_to_layout(uint64_t modifier)
518 {
519 if (drm_is_afbc(modifier))
520 return MALI_TEXTURE_LAYOUT_AFBC;
521 else if (modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED)
522 return MALI_TEXTURE_LAYOUT_TILED;
523 else if (modifier == DRM_FORMAT_MOD_LINEAR)
524 return MALI_TEXTURE_LAYOUT_LINEAR;
525 else
526 unreachable("Invalid modifer");
527 }
528 #endif
529
530 /*
531 * Generates a texture descriptor. Ideally, descriptors are immutable after the
532 * texture is created, so we can keep these hanging around in GPU memory in a
533 * dedicated BO and not have to worry. In practice there are some minor gotchas
534 * with this (the driver sometimes will change the format of a texture on the
535 * fly for compression) but it's fast enough to just regenerate the descriptor
536 * in those cases, rather than monkeypatching at drawtime. A texture descriptor
537 * consists of a 32-byte header followed by pointers.
538 */
539 void
GENX(panfrost_new_texture)540 GENX(panfrost_new_texture)(const struct panfrost_device *dev,
541 const struct pan_image_view *iview,
542 void *out, const struct panfrost_ptr *payload)
543 {
544 const struct pan_image_layout *layout = &iview->image->layout;
545 enum pipe_format format = iview->format;
546 unsigned swizzle;
547
548 if (PAN_ARCH >= 7 && util_format_is_depth_or_stencil(format)) {
549 /* v7+ doesn't have an _RRRR component order, combine the
550 * user swizzle with a .XXXX swizzle to emulate that.
551 */
552 static const unsigned char replicate_x[4] = {
553 PIPE_SWIZZLE_X, PIPE_SWIZZLE_X,
554 PIPE_SWIZZLE_X, PIPE_SWIZZLE_X,
555 };
556 unsigned char patched_swizzle[4];
557
558 util_format_compose_swizzles(replicate_x,
559 iview->swizzle,
560 patched_swizzle);
561 swizzle = panfrost_translate_swizzle_4(patched_swizzle);
562 } else {
563 swizzle = panfrost_translate_swizzle_4(iview->swizzle);
564 }
565
566 panfrost_emit_texture_payload(iview, format, payload->cpu);
567
568 unsigned array_size = iview->last_layer - iview->first_layer + 1;
569
570 if (iview->dim == MALI_TEXTURE_DIMENSION_CUBE) {
571 assert(iview->first_layer % 6 == 0);
572 assert(iview->last_layer % 6 == 5);
573 array_size /= 6;
574 }
575
576 unsigned width;
577
578 if (iview->buf.size) {
579 assert(iview->dim == MALI_TEXTURE_DIMENSION_1D);
580 assert(!iview->first_level && !iview->last_level);
581 assert(!iview->first_layer && !iview->last_layer);
582 assert(layout->nr_samples == 1);
583 assert(layout->height == 1 && layout->depth == 1);
584 assert(iview->buf.offset + iview->buf.size <= layout->width);
585 width = iview->buf.size;
586 } else {
587 width = u_minify(layout->width, iview->first_level);
588 }
589
590 pan_pack(out, TEXTURE, cfg) {
591 cfg.dimension = iview->dim;
592 cfg.format = dev->formats[format].hw;
593 cfg.width = width;
594 cfg.height = u_minify(layout->height, iview->first_level);
595 if (iview->dim == MALI_TEXTURE_DIMENSION_3D)
596 cfg.depth = u_minify(layout->depth, iview->first_level);
597 else
598 cfg.sample_count = layout->nr_samples;
599 cfg.swizzle = swizzle;
600 #if PAN_ARCH >= 9
601 cfg.texel_interleave =
602 (layout->modifier != DRM_FORMAT_MOD_LINEAR) ||
603 util_format_is_compressed(format);
604 #else
605 cfg.texel_ordering =
606 panfrost_modifier_to_layout(layout->modifier);
607 #endif
608 cfg.levels = iview->last_level - iview->first_level + 1;
609 cfg.array_size = array_size;
610
611 #if PAN_ARCH >= 6
612 cfg.surfaces = payload->gpu;
613
614 /* We specify API-level LOD clamps in the sampler descriptor
615 * and use these clamps simply for bounds checking */
616 cfg.minimum_lod = FIXED_16(0, false);
617 cfg.maximum_lod = FIXED_16(cfg.levels - 1, false);
618 #else
619 cfg.manual_stride = true;
620 #endif
621 }
622 }
623