• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Valve Corporation
3  * Copyright 2024 Alyssa Rosenzweig
4  * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5  * SPDX-License-Identifier: MIT
6  */
7 #include "hk_image_view.h"
8 #include "util/format/u_format.h"
9 #include "vulkan/vulkan_core.h"
10 
11 #include "agx_helpers.h"
12 #include "agx_nir_texture.h"
13 #include "agx_pack.h"
14 #include "hk_device.h"
15 #include "hk_entrypoints.h"
16 #include "hk_image.h"
17 #include "hk_physical_device.h"
18 
19 #include "layout.h"
20 #include "vk_format.h"
21 #include "vk_meta.h"
22 
23 enum hk_desc_usage {
24    HK_DESC_USAGE_SAMPLED,
25    HK_DESC_USAGE_STORAGE,
26    HK_DESC_USAGE_INPUT,
27    HK_DESC_USAGE_BG_EOT,
28    HK_DESC_USAGE_LAYERED_BG_EOT,
29    HK_DESC_USAGE_EMRT,
30 };
31 
32 static bool
hk_image_view_type_is_array(VkImageViewType view_type)33 hk_image_view_type_is_array(VkImageViewType view_type)
34 {
35    switch (view_type) {
36    case VK_IMAGE_VIEW_TYPE_1D:
37    case VK_IMAGE_VIEW_TYPE_2D:
38    case VK_IMAGE_VIEW_TYPE_3D:
39    case VK_IMAGE_VIEW_TYPE_CUBE:
40       return false;
41 
42    case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
43    case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
44    case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
45       return true;
46 
47    default:
48       unreachable("Invalid image view type");
49    }
50 }
51 
52 static enum agx_texture_dimension
translate_image_view_type(VkImageViewType view_type,bool msaa,bool layered,enum hk_desc_usage usage)53 translate_image_view_type(VkImageViewType view_type, bool msaa, bool layered,
54                           enum hk_desc_usage usage)
55 {
56    if (usage == HK_DESC_USAGE_EMRT || usage == HK_DESC_USAGE_INPUT ||
57        (usage == HK_DESC_USAGE_LAYERED_BG_EOT && layered)) {
58       return msaa ? AGX_TEXTURE_DIMENSION_2D_ARRAY_MULTISAMPLED
59                   : AGX_TEXTURE_DIMENSION_2D_ARRAY;
60    }
61 
62    /* For background/EOT, we ignore the application-provided view type */
63    if (usage == HK_DESC_USAGE_BG_EOT || usage == HK_DESC_USAGE_LAYERED_BG_EOT) {
64       return msaa ? AGX_TEXTURE_DIMENSION_2D_MULTISAMPLED
65                   : AGX_TEXTURE_DIMENSION_2D;
66    }
67 
68    bool cubes_to_2d = usage != HK_DESC_USAGE_SAMPLED;
69 
70    switch (view_type) {
71    case VK_IMAGE_VIEW_TYPE_1D:
72    case VK_IMAGE_VIEW_TYPE_2D:
73       return msaa ? AGX_TEXTURE_DIMENSION_2D_MULTISAMPLED
74                   : AGX_TEXTURE_DIMENSION_2D;
75 
76    case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
77    case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
78       return msaa ? AGX_TEXTURE_DIMENSION_2D_ARRAY_MULTISAMPLED
79                   : AGX_TEXTURE_DIMENSION_2D_ARRAY;
80 
81    case VK_IMAGE_VIEW_TYPE_3D:
82       assert(!msaa);
83       return AGX_TEXTURE_DIMENSION_3D;
84 
85    case VK_IMAGE_VIEW_TYPE_CUBE:
86       assert(!msaa);
87       return cubes_to_2d ? AGX_TEXTURE_DIMENSION_2D_ARRAY
88                          : AGX_TEXTURE_DIMENSION_CUBE;
89 
90    case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
91       assert(!msaa);
92       return cubes_to_2d ? AGX_TEXTURE_DIMENSION_2D_ARRAY
93                          : AGX_TEXTURE_DIMENSION_CUBE_ARRAY;
94 
95    default:
96       unreachable("Invalid image view type");
97    }
98 }
99 
100 static enum pipe_swizzle
vk_swizzle_to_pipe(VkComponentSwizzle swizzle)101 vk_swizzle_to_pipe(VkComponentSwizzle swizzle)
102 {
103    switch (swizzle) {
104    case VK_COMPONENT_SWIZZLE_R:
105       return PIPE_SWIZZLE_X;
106    case VK_COMPONENT_SWIZZLE_G:
107       return PIPE_SWIZZLE_Y;
108    case VK_COMPONENT_SWIZZLE_B:
109       return PIPE_SWIZZLE_Z;
110    case VK_COMPONENT_SWIZZLE_A:
111       return PIPE_SWIZZLE_W;
112    case VK_COMPONENT_SWIZZLE_ONE:
113       return PIPE_SWIZZLE_1;
114    case VK_COMPONENT_SWIZZLE_ZERO:
115       return PIPE_SWIZZLE_0;
116    default:
117       unreachable("Invalid component swizzle");
118    }
119 }
120 
121 static enum pipe_format
get_stencil_format(enum pipe_format format)122 get_stencil_format(enum pipe_format format)
123 {
124    switch (format) {
125    case PIPE_FORMAT_S8_UINT:
126       return PIPE_FORMAT_S8_UINT;
127    case PIPE_FORMAT_Z24_UNORM_S8_UINT:
128       return PIPE_FORMAT_X24S8_UINT;
129    case PIPE_FORMAT_S8_UINT_Z24_UNORM:
130       return PIPE_FORMAT_S8X24_UINT;
131    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
132       return PIPE_FORMAT_X32_S8X24_UINT;
133    default:
134       unreachable("Unsupported depth/stencil format");
135    }
136 }
137 
138 struct hk_3d {
139    unsigned x, y, z;
140 };
141 
142 static struct hk_3d
view_denominator(struct hk_image_view * view)143 view_denominator(struct hk_image_view *view)
144 {
145    enum pipe_format view_format = hk_format_to_pipe_format(view->vk.format);
146    enum pipe_format img_format =
147       hk_format_to_pipe_format(view->vk.image->format);
148 
149    if (util_format_is_compressed(view_format)) {
150       /*
151        * We can do an uncompressed view of a compressed image but not the other
152        * way around.
153        */
154       assert(util_format_is_compressed(img_format));
155       assert(util_format_get_blockwidth(img_format) ==
156              util_format_get_blockwidth(view_format));
157       assert(util_format_get_blockheight(img_format) ==
158              util_format_get_blockheight(view_format));
159       assert(util_format_get_blockdepth(img_format) ==
160              util_format_get_blockdepth(view_format));
161 
162       return (struct hk_3d){1, 1, 1};
163    }
164 
165    if (!util_format_is_compressed(img_format)) {
166       /* Both formats uncompressed */
167       return (struct hk_3d){1, 1, 1};
168    }
169 
170    /* Else, img is compressed but view is not */
171    return (struct hk_3d){
172       util_format_get_blockwidth(img_format),
173       util_format_get_blockheight(img_format),
174       util_format_get_blockdepth(img_format),
175    };
176 }
177 
178 static enum pipe_format
format_for_plane(struct hk_image_view * view,unsigned view_plane)179 format_for_plane(struct hk_image_view *view, unsigned view_plane)
180 {
181    const struct vk_format_ycbcr_info *ycbcr_info =
182       vk_format_get_ycbcr_info(view->vk.format);
183 
184    assert(ycbcr_info || view_plane == 0);
185    VkFormat plane_format =
186       ycbcr_info ? ycbcr_info->planes[view_plane].format : view->vk.format;
187 
188    enum pipe_format p_format = hk_format_to_pipe_format(plane_format);
189    if (view->vk.aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
190       p_format = get_stencil_format(p_format);
191 
192    return p_format;
193 }
194 
195 static void
pack_texture(struct hk_image_view * view,unsigned view_plane,enum hk_desc_usage usage,struct agx_texture_packed * out)196 pack_texture(struct hk_image_view *view, unsigned view_plane,
197              enum hk_desc_usage usage, struct agx_texture_packed *out)
198 {
199    struct hk_image *image = container_of(view->vk.image, struct hk_image, vk);
200    const uint8_t image_plane = view->planes[view_plane].image_plane;
201    struct ail_layout *layout = &image->planes[image_plane].layout;
202    uint64_t base_addr = hk_image_base_address(image, image_plane);
203 
204    bool cubes_to_2d = usage != HK_DESC_USAGE_SAMPLED;
205 
206    unsigned level = view->vk.base_mip_level;
207    unsigned layer = view->vk.base_array_layer;
208 
209    enum pipe_format p_format = format_for_plane(view, view_plane);
210    const struct util_format_description *desc =
211       util_format_description(p_format);
212 
213    struct hk_3d denom = view_denominator(view);
214 
215    uint8_t format_swizzle[4] = {
216       desc->swizzle[0],
217       desc->swizzle[1],
218       desc->swizzle[2],
219       desc->swizzle[3],
220    };
221 
222    /* Different APIs have different depth/stencil swizzle rules. Vulkan expects
223     * R001 behaviour, override here because Mesa's format table is not that.
224     */
225    if (util_format_is_depth_or_stencil(p_format)) {
226       format_swizzle[0] = PIPE_SWIZZLE_X;
227       format_swizzle[1] = PIPE_SWIZZLE_0;
228       format_swizzle[2] = PIPE_SWIZZLE_0;
229       format_swizzle[3] = PIPE_SWIZZLE_1;
230    }
231 
232    /* We only have a single swizzle for the user swizzle and the format
233     * fixup, so compose them now.
234     */
235    uint8_t out_swizzle[4];
236    uint8_t view_swizzle[4] = {
237       vk_swizzle_to_pipe(view->vk.swizzle.r),
238       vk_swizzle_to_pipe(view->vk.swizzle.g),
239       vk_swizzle_to_pipe(view->vk.swizzle.b),
240       vk_swizzle_to_pipe(view->vk.swizzle.a),
241    };
242 
243    unsigned layers = view->vk.layer_count;
244    if (view->vk.view_type == VK_IMAGE_VIEW_TYPE_3D) {
245       layers = DIV_ROUND_UP(layout->depth_px, denom.z);
246    } else if (!cubes_to_2d &&
247               (view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE ||
248                view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)) {
249 
250       layers /= 6;
251    }
252 
253    util_format_compose_swizzles(format_swizzle, view_swizzle, out_swizzle);
254 
255    agx_pack(out, TEXTURE, cfg) {
256       cfg.dimension = translate_image_view_type(
257          view->vk.view_type, view->vk.image->samples > 1, layers > 1, usage);
258       cfg.layout = agx_translate_layout(layout->tiling);
259       cfg.channels = ail_pixel_format[p_format].channels;
260       cfg.type = ail_pixel_format[p_format].type;
261       cfg.srgb = util_format_is_srgb(p_format);
262 
263       cfg.swizzle_r = agx_channel_from_pipe(out_swizzle[0]);
264       cfg.swizzle_g = agx_channel_from_pipe(out_swizzle[1]);
265       cfg.swizzle_b = agx_channel_from_pipe(out_swizzle[2]);
266       cfg.swizzle_a = agx_channel_from_pipe(out_swizzle[3]);
267 
268       if (denom.x > 1) {
269          assert(view->vk.level_count == 1);
270          assert(view->vk.layer_count == 1);
271 
272          cfg.address = base_addr + ail_get_layer_level_B(layout, layer, level);
273          cfg.width = DIV_ROUND_UP(u_minify(layout->width_px, level), denom.x);
274          cfg.height = DIV_ROUND_UP(u_minify(layout->height_px, level), denom.y);
275          cfg.first_level = 0;
276          cfg.last_level = 1;
277       } else {
278          cfg.address = base_addr + ail_get_layer_offset_B(layout, layer);
279          cfg.width = layout->width_px;
280          cfg.height = layout->height_px;
281          cfg.first_level = level;
282          cfg.last_level = level + view->vk.level_count - 1;
283       }
284 
285       cfg.srgb = (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
286       cfg.unk_mipmapped = layout->levels > 1;
287       cfg.srgb_2_channel = cfg.srgb && util_format_colormask(desc) == 0x3;
288 
289       if (ail_is_compressed(layout)) {
290          cfg.compressed_1 = true;
291          cfg.extended = true;
292       }
293 
294       if (ail_is_compressed(layout)) {
295          cfg.acceleration_buffer = base_addr + layout->metadata_offset_B +
296                                    (layer * layout->compression_layer_stride_B);
297       }
298 
299       if (layout->tiling == AIL_TILING_LINEAR &&
300           (hk_image_view_type_is_array(view->vk.view_type))) {
301 
302          cfg.depth_linear = layers;
303          cfg.layer_stride_linear = layout->layer_stride_B - 0x80;
304          cfg.extended = true;
305       } else {
306          assert((layout->tiling != AIL_TILING_LINEAR) || (layers == 1));
307          cfg.depth = layers;
308       }
309 
310       if (view->vk.image->samples > 1) {
311          cfg.samples = agx_translate_sample_count(view->vk.image->samples);
312       }
313 
314       if (layout->tiling == AIL_TILING_LINEAR) {
315          cfg.stride = ail_get_linear_stride_B(layout, 0) - 16;
316       } else {
317          assert(layout->tiling == AIL_TILING_TWIDDLED ||
318                 layout->tiling == AIL_TILING_TWIDDLED_COMPRESSED);
319 
320          cfg.page_aligned_layers = layout->page_aligned_layers;
321       }
322    }
323 }
324 
325 static void
pack_pbe(struct hk_device * dev,struct hk_image_view * view,unsigned view_plane,enum hk_desc_usage usage,struct agx_pbe_packed * out)326 pack_pbe(struct hk_device *dev, struct hk_image_view *view, unsigned view_plane,
327          enum hk_desc_usage usage, struct agx_pbe_packed *out)
328 {
329    struct hk_image *image = container_of(view->vk.image, struct hk_image, vk);
330    const uint8_t image_plane = view->planes[view_plane].image_plane;
331    struct ail_layout *layout = &image->planes[image_plane].layout;
332    uint64_t base_addr = hk_image_base_address(image, image_plane);
333 
334    unsigned level = view->vk.base_mip_level;
335    unsigned layer = view->vk.base_array_layer;
336 
337    enum pipe_format p_format = format_for_plane(view, view_plane);
338    const struct util_format_description *desc =
339       util_format_description(p_format);
340 
341    bool eot =
342       usage == HK_DESC_USAGE_BG_EOT || usage == HK_DESC_USAGE_LAYERED_BG_EOT;
343 
344    /* The tilebuffer is already in sRGB space if needed. Do not convert for
345     * end-of-tile descriptors.
346     */
347    if (eot)
348       p_format = util_format_linear(p_format);
349 
350    bool msaa = view->vk.image->samples > 1;
351    struct hk_3d denom = view_denominator(view);
352 
353    unsigned layers = view->vk.view_type == VK_IMAGE_VIEW_TYPE_3D
354                         ? image->vk.extent.depth
355                         : view->vk.layer_count;
356 
357    agx_pack(out, PBE, cfg) {
358       cfg.dimension =
359          translate_image_view_type(view->vk.view_type, msaa, layers > 1, usage);
360       cfg.layout = agx_translate_layout(layout->tiling);
361       cfg.channels = ail_pixel_format[p_format].channels;
362       cfg.type = ail_pixel_format[p_format].type;
363       cfg.srgb = util_format_is_srgb(p_format);
364 
365       assert(desc->nr_channels >= 1 && desc->nr_channels <= 4);
366 
367       for (unsigned i = 0; i < desc->nr_channels; ++i) {
368          if (desc->swizzle[i] == 0)
369             cfg.swizzle_r = i;
370          else if (desc->swizzle[i] == 1)
371             cfg.swizzle_g = i;
372          else if (desc->swizzle[i] == 2)
373             cfg.swizzle_b = i;
374          else if (desc->swizzle[i] == 3)
375             cfg.swizzle_a = i;
376       }
377 
378       cfg.buffer = base_addr + ail_get_layer_offset_B(layout, layer);
379       cfg.unk_mipmapped = layout->levels > 1;
380 
381       if (msaa & !eot) {
382          /* Multisampled images are bound like buffer textures, with
383           * addressing arithmetic to determine the texel to write.
384           *
385           * Note that the end-of-tile program uses real multisample images
386           * with image_write_block instructions.
387           */
388          unsigned blocksize_B = util_format_get_blocksize(p_format);
389          unsigned size_px =
390             (layout->size_B - layout->layer_stride_B * layer) / blocksize_B;
391 
392          cfg.dimension = AGX_TEXTURE_DIMENSION_2D;
393          cfg.layout = AGX_LAYOUT_LINEAR;
394          cfg.width = AGX_TEXTURE_BUFFER_WIDTH;
395          cfg.height = DIV_ROUND_UP(size_px, cfg.width);
396          cfg.stride = (cfg.width * blocksize_B) - 4;
397          cfg.layers = 1;
398          cfg.levels = 1;
399 
400          cfg.buffer += layout->level_offsets_B[level];
401          cfg.level = 0;
402       } else {
403          if (denom.x > 1) {
404             assert(denom.z == 1 && "todo how to handle?");
405             assert(view->vk.level_count == 1);
406             assert(view->vk.layer_count == 1);
407 
408             cfg.buffer =
409                base_addr + ail_get_layer_level_B(layout, layer, level);
410             cfg.width =
411                DIV_ROUND_UP(u_minify(layout->width_px, level), denom.x);
412             cfg.height =
413                DIV_ROUND_UP(u_minify(layout->height_px, level), denom.y);
414             cfg.level = 0;
415          } else {
416             cfg.buffer = base_addr + ail_get_layer_offset_B(layout, layer);
417             cfg.width = layout->width_px;
418             cfg.height = layout->height_px;
419             cfg.level = level;
420          }
421 
422          if (layout->tiling == AIL_TILING_LINEAR &&
423              (hk_image_view_type_is_array(view->vk.view_type))) {
424 
425             cfg.depth_linear = layers;
426             cfg.layer_stride_linear = (layout->layer_stride_B - 0x80);
427             cfg.extended = true;
428          } else {
429             assert((layout->tiling != AIL_TILING_LINEAR) || (layers == 1));
430             cfg.layers = layers;
431          }
432 
433          cfg.levels = image->vk.mip_levels;
434 
435          if (layout->tiling == AIL_TILING_LINEAR) {
436             cfg.stride = ail_get_linear_stride_B(layout, level) - 4;
437             assert(cfg.levels == 1);
438          } else {
439             cfg.page_aligned_layers = layout->page_aligned_layers;
440          }
441 
442          if (image->vk.samples > 1)
443             cfg.samples = agx_translate_sample_count(image->vk.samples);
444       }
445 
446       if (ail_is_compressed(layout) && usage != HK_DESC_USAGE_EMRT) {
447          cfg.compressed_1 = true;
448          cfg.extended = true;
449 
450          cfg.acceleration_buffer = base_addr + layout->metadata_offset_B +
451                                    (layer * layout->compression_layer_stride_B);
452       }
453 
454       /* When the descriptor isn't extended architecturally, we use
455        * the last 8 bytes as a sideband to accelerate image atomics.
456        */
457       if (!cfg.extended &&
458           (layout->writeable_image || usage == HK_DESC_USAGE_EMRT)) {
459 
460          if (msaa) {
461             assert(denom.x == 1 && "no MSAA of block-compressed");
462 
463             cfg.aligned_width_msaa_sw =
464                align(u_minify(layout->width_px, level),
465                      layout->tilesize_el[level].width_el);
466          } else {
467             cfg.level_offset_sw = ail_get_level_offset_B(layout, cfg.level);
468          }
469 
470          cfg.sample_count_log2_sw = util_logbase2(image->vk.samples);
471 
472          if (layout->tiling != AIL_TILING_LINEAR) {
473             struct ail_tile tile_size = layout->tilesize_el[level];
474             cfg.tile_width_sw = tile_size.width_el;
475             cfg.tile_height_sw = tile_size.height_el;
476 
477             cfg.layer_stride_sw = layout->layer_stride_B;
478          }
479       }
480    };
481 }
482 
483 static VkResult
add_descriptor(struct hk_device * dev,struct hk_image_view * view,struct agx_texture_packed * desc,struct agx_texture_packed * cached,uint32_t * index)484 add_descriptor(struct hk_device *dev, struct hk_image_view *view,
485                struct agx_texture_packed *desc,
486                struct agx_texture_packed *cached, uint32_t *index)
487 {
488    /* First, look for a descriptor we already uploaded */
489    for (unsigned i = 0; i < view->descriptor_count; ++i) {
490       if (memcmp(&cached[i], desc, sizeof *desc) == 0) {
491          *index = view->descriptor_index[i];
492          return VK_SUCCESS;
493       }
494    }
495 
496    /* Else, add a new descriptor */
497    VkResult result =
498       hk_descriptor_table_add(dev, &dev->images, desc, sizeof *desc, index);
499    if (result != VK_SUCCESS)
500       return result;
501 
502    uint32_t local_index = view->descriptor_count++;
503    assert(local_index < HK_MAX_IMAGE_DESCS);
504 
505    cached[local_index] = *desc;
506    view->descriptor_index[local_index] = *index;
507    return VK_SUCCESS;
508 }
509 
510 static VkResult
hk_image_view_init(struct hk_device * dev,struct hk_image_view * view,bool driver_internal,const VkImageViewCreateInfo * pCreateInfo)511 hk_image_view_init(struct hk_device *dev, struct hk_image_view *view,
512                    bool driver_internal,
513                    const VkImageViewCreateInfo *pCreateInfo)
514 {
515    VK_FROM_HANDLE(hk_image, image, pCreateInfo->image);
516    VkResult result;
517 
518    memset(view, 0, sizeof(*view));
519 
520    vk_image_view_init(&dev->vk, &view->vk, driver_internal, pCreateInfo);
521 
522    /* First, figure out which image planes we need. For depth/stencil, we only
523     * have one aspect viewed at a time.
524     */
525    if (image->vk.aspects &
526        (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
527 
528       view->plane_count = 1;
529       view->planes[0].image_plane =
530          hk_image_aspects_to_plane(image, view->vk.aspects);
531    } else {
532       /* For other formats, retrieve the plane count from the aspect mask
533        * and then walk through the aspect mask to map each image plane
534        * to its corresponding view plane
535        */
536       assert(util_bitcount(view->vk.aspects) ==
537              vk_format_get_plane_count(view->vk.format));
538       view->plane_count = 0;
539       u_foreach_bit(aspect_bit, view->vk.aspects) {
540          uint8_t image_plane =
541             hk_image_aspects_to_plane(image, 1u << aspect_bit);
542          view->planes[view->plane_count++].image_plane = image_plane;
543       }
544    }
545 
546    struct agx_texture_packed cached[HK_MAX_IMAGE_DESCS];
547 
548    /* Finally, fill in each view plane separately */
549    for (unsigned view_plane = 0; view_plane < view->plane_count; view_plane++) {
550       const struct {
551          VkImageUsageFlagBits flag;
552          enum hk_desc_usage usage;
553          uint32_t *tex;
554          uint32_t *pbe;
555       } descriptors[] = {
556          {VK_IMAGE_USAGE_SAMPLED_BIT, HK_DESC_USAGE_SAMPLED,
557           &view->planes[view_plane].sampled_desc_index},
558 
559          {VK_IMAGE_USAGE_STORAGE_BIT, HK_DESC_USAGE_STORAGE,
560           &view->planes[view_plane].ro_storage_desc_index,
561           &view->planes[view_plane].storage_desc_index},
562 
563          {VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, HK_DESC_USAGE_INPUT,
564           &view->planes[view_plane].ia_desc_index},
565 
566          {VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, HK_DESC_USAGE_BG_EOT,
567           &view->planes[view_plane].background_desc_index,
568           &view->planes[view_plane].eot_pbe_desc_index},
569 
570          {VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, HK_DESC_USAGE_LAYERED_BG_EOT,
571           &view->planes[view_plane].layered_background_desc_index,
572           &view->planes[view_plane].layered_eot_pbe_desc_index},
573       };
574 
575       for (unsigned i = 0; i < ARRAY_SIZE(descriptors); ++i) {
576          if (!(view->vk.usage & descriptors[i].flag))
577             continue;
578 
579          for (unsigned is_pbe = 0; is_pbe < 2; ++is_pbe) {
580             struct agx_texture_packed desc;
581             uint32_t *out = is_pbe ? descriptors[i].pbe : descriptors[i].tex;
582 
583             if (!out)
584                continue;
585 
586             if (is_pbe) {
587                static_assert(sizeof(struct agx_pbe_packed) ==
588                              sizeof(struct agx_texture_packed));
589 
590                pack_pbe(dev, view, view_plane, descriptors[i].usage,
591                         (struct agx_pbe_packed *)&desc);
592             } else {
593                pack_texture(view, view_plane, descriptors[i].usage, &desc);
594             }
595 
596             result = add_descriptor(dev, view, &desc, cached, out);
597             if (result != VK_SUCCESS)
598                return result;
599          }
600       }
601 
602       if (view->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
603          pack_texture(view, view_plane, HK_DESC_USAGE_EMRT,
604                       &view->planes[view_plane].emrt_texture);
605 
606          pack_pbe(dev, view, view_plane, HK_DESC_USAGE_EMRT,
607                   &view->planes[view_plane].emrt_pbe);
608       }
609    }
610 
611    return VK_SUCCESS;
612 }
613 
614 VKAPI_ATTR void VKAPI_CALL
hk_DestroyImageView(VkDevice _device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)615 hk_DestroyImageView(VkDevice _device, VkImageView imageView,
616                     const VkAllocationCallbacks *pAllocator)
617 {
618    VK_FROM_HANDLE(hk_device, dev, _device);
619    VK_FROM_HANDLE(hk_image_view, view, imageView);
620 
621    if (!view)
622       return;
623 
624    for (uint8_t d = 0; d < view->descriptor_count; ++d) {
625       hk_descriptor_table_remove(dev, &dev->images, view->descriptor_index[d]);
626    }
627 
628    vk_image_view_finish(&view->vk);
629    vk_free2(&dev->vk.alloc, pAllocator, view);
630 }
631 
632 VKAPI_ATTR VkResult VKAPI_CALL
hk_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)633 hk_CreateImageView(VkDevice _device, const VkImageViewCreateInfo *pCreateInfo,
634                    const VkAllocationCallbacks *pAllocator, VkImageView *pView)
635 {
636    VK_FROM_HANDLE(hk_device, dev, _device);
637    struct hk_image_view *view;
638    VkResult result;
639 
640    view = vk_alloc2(&dev->vk.alloc, pAllocator, sizeof(*view), 8,
641                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
642    if (!view)
643       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
644 
645    result = hk_image_view_init(
646       dev, view,
647       pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_DRIVER_INTERNAL_BIT_MESA,
648       pCreateInfo);
649    if (result != VK_SUCCESS) {
650       hk_DestroyImageView(_device, hk_image_view_to_handle(view), pAllocator);
651       return result;
652    }
653 
654    *pView = hk_image_view_to_handle(view);
655 
656    return VK_SUCCESS;
657 }
658