• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #ifndef NVK_IMAGE_H
6 #define NVK_IMAGE_H 1
7 
8 #include "nvk_private.h"
9 #include "nvk_device_memory.h"
10 
11 #include "vk_image.h"
12 
13 #include "nil.h"
14 
15 /* Because small images can end up with an array_stride_B that is less than
16  * the sparse block size (in bytes), we have to set SINGLE_MIPTAIL_BIT when
17  * advertising sparse properties to the client.  This means that we get one
18  * single memory range for the miptail of the image.  For large images with
19  * mipTailStartLod > 0, we have to deal with the array stride ourselves.
20  *
21  * We do this by returning NVK_MIP_TAIL_START_OFFSET as the image's
22  * imageMipTailOffset.  We can then detect anything with that address as
23  * being part of the miptail and re-map it accordingly.  The Vulkan spec
24  * explicitly allows for this.
25  *
26  * From the Vulkan 1.3.279 spec:
27  *
28  *    "When VK_SPARSE_MEMORY_BIND_METADATA_BIT is present, the resourceOffset
29  *    must have been derived explicitly from the imageMipTailOffset in the
30  *    sparse resource properties returned for the metadata aspect. By
31  *    manipulating the value returned for imageMipTailOffset, the
32  *    resourceOffset does not have to correlate directly to a device virtual
33  *    address offset, and may instead be whatever value makes it easiest for
34  *    the implementation to derive the correct device virtual address."
35  */
36 #define NVK_MIP_TAIL_START_OFFSET 0x6d74000000000000UL
37 
38 struct nvk_device_memory;
39 struct nvk_physical_device;
40 struct nvk_queue;
41 struct nvkmd_mem;
42 struct nvkmd_va;
43 
44 VkFormatFeatureFlags2
45 nvk_get_image_format_features(struct nvk_physical_device *pdevice,
46                               VkFormat format, VkImageTiling tiling,
47                               uint64_t drm_format_mod);
48 
49 void
50 nvk_get_drm_format_modifier_properties_list(struct nvk_physical_device *pdev,
51                                             VkFormat vk_format,
52                                             VkBaseOutStructure *ext);
53 
54 uint32_t
55 nvk_image_max_dimension(const struct nv_device_info *info,
56                         VkImageType image_type);
57 
58 struct nvk_image_plane {
59    struct nil_image nil;
60    uint64_t addr;
61 
62    /** Reserved VA for sparse images, NULL otherwise. */
63    struct nvkmd_va *va;
64 
65    /* Needed for EXT_Host_Image_Copy. We get GPU addresses from the API,
66     * so we stash in the memory object and the offset in the plane to be able
67     * to retrieve CPU addresses for host copies.
68     */
69    struct nvk_device_memory *host_mem;
70    uint64_t host_offset;
71 };
72 
73 struct nvk_image {
74    struct vk_image vk;
75 
76    /** True if the planes are bound separately
77     *
78     * This is set based on VK_IMAGE_CREATE_DISJOINT_BIT
79     */
80    bool disjoint;
81 
82    uint8_t plane_count;
83    struct nvk_image_plane planes[3];
84 
85    /* In order to support D32_SFLOAT_S8_UINT, a temp area is
86     * needed. The stencil plane can't be a copied using the DMA
87     * engine in a single pass since it would need 8 components support.
88     * Instead we allocate a 16-bit temp, that gets copied into, then
89     * copied again down to the 8-bit result.
90     */
91    struct nvk_image_plane stencil_copy_temp;
92 
93    /* The hardware doesn't support rendering to linear images except
94     * under certain conditions, so to support DRM_FORMAT_MOD_LINEAR
95     * rendering in the general case, we need to keep a tiled copy, which would
96     * be used to fake support if the conditions aren't satisfied.
97     */
98    struct nvk_image_plane linear_tiled_shadow;
99    struct nvkmd_mem *linear_tiled_shadow_mem;
100 };
101 
102 VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
103 
104 static inline uint64_t
nvk_image_plane_base_address(const struct nvk_image_plane * plane)105 nvk_image_plane_base_address(const struct nvk_image_plane *plane)
106 {
107    return plane->addr;
108 }
109 
110 static inline uint64_t
nvk_image_base_address(const struct nvk_image * image,uint8_t plane)111 nvk_image_base_address(const struct nvk_image *image, uint8_t plane)
112 {
113    return nvk_image_plane_base_address(&image->planes[plane]);
114 }
115 
116 static inline uint8_t
nvk_image_aspects_to_plane(ASSERTED const struct nvk_image * image,VkImageAspectFlags aspectMask)117 nvk_image_aspects_to_plane(ASSERTED const struct nvk_image *image,
118                            VkImageAspectFlags aspectMask)
119 {
120    /* Memory planes are only allowed for memory operations */
121    assert(!(aspectMask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
122                           VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
123                           VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
124                           VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)));
125 
126    /* Verify that the aspects are actually in the image */
127    assert(!(aspectMask & ~image->vk.aspects));
128 
129    /* Must only be one aspect unless it's depth/stencil */
130    assert(aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
131                          VK_IMAGE_ASPECT_STENCIL_BIT) ||
132           util_bitcount(aspectMask) == 1);
133 
134    switch(aspectMask) {
135    case VK_IMAGE_ASPECT_PLANE_1_BIT: return 1;
136    case VK_IMAGE_ASPECT_PLANE_2_BIT: return 2;
137    default: return 0;
138    }
139 }
140 
141 static inline uint8_t
nvk_image_memory_aspects_to_plane(ASSERTED const struct nvk_image * image,VkImageAspectFlags aspectMask)142 nvk_image_memory_aspects_to_plane(ASSERTED const struct nvk_image *image,
143                                   VkImageAspectFlags aspectMask)
144 {
145    if (aspectMask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
146                      VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
147                      VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
148                      VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
149       /* We don't support DRM format modifiers on anything but single-plane
150        * color at the moment.
151        */
152       assert(aspectMask == VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT);
153       return 0;
154    } else {
155       return nvk_image_aspects_to_plane(image, aspectMask);
156    }
157 }
158 
159 VkResult nvk_queue_image_bind(struct nvk_queue *queue,
160                               const VkSparseImageMemoryBindInfo *bind_info);
161 
162 VkResult nvk_queue_image_opaque_bind(struct nvk_queue *queue,
163                                      const VkSparseImageOpaqueMemoryBindInfo *bind_info);
164 
165 #endif
166