• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25  */
26 
27 #ifndef ZINK_BO_H
28 #define ZINK_BO_H
29 #include <vulkan/vulkan.h>
30 #include "pipebuffer/pb_cache.h"
31 #include "pipebuffer/pb_slab.h"
32 #include "zink_batch.h"
33 
34 #define VK_VIS_VRAM (VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
35 #define VK_LAZY_VRAM (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
36 enum zink_resource_access {
37    ZINK_RESOURCE_ACCESS_READ = 1,
38    ZINK_RESOURCE_ACCESS_WRITE = 32,
39    ZINK_RESOURCE_ACCESS_RW = ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE,
40 };
41 
42 
43 enum zink_heap {
44    ZINK_HEAP_DEVICE_LOCAL,
45    ZINK_HEAP_DEVICE_LOCAL_SPARSE,
46    ZINK_HEAP_DEVICE_LOCAL_LAZY,
47    ZINK_HEAP_DEVICE_LOCAL_VISIBLE,
48    ZINK_HEAP_HOST_VISIBLE_COHERENT,
49    ZINK_HEAP_HOST_VISIBLE_CACHED,
50    ZINK_HEAP_MAX,
51 };
52 
53 enum zink_alloc_flag {
54    ZINK_ALLOC_SPARSE = 1<<0,
55    ZINK_ALLOC_NO_SUBALLOC = 1<<1,
56 };
57 
58 struct bo_export {
59    /** File descriptor associated with a handle export. */
60    int drm_fd;
61 
62    /** GEM handle in drm_fd */
63    uint32_t gem_handle;
64 
65    struct list_head link;
66 };
67 
68 struct zink_bo {
69    struct pb_buffer base;
70 
71    union {
72       struct {
73          void *cpu_ptr; /* for user_ptr and permanent maps */
74          int map_count;
75          struct list_head exports;
76          simple_mtx_t export_lock;
77 
78          bool is_user_ptr;
79          bool use_reusable_pool;
80 
81          /* Whether buffer_get_handle or buffer_from_handle has been called,
82           * it can only transition from false to true. Protected by lock.
83           */
84          bool is_shared;
85       } real;
86       struct {
87          struct pb_slab_entry entry;
88          struct zink_bo *real;
89       } slab;
90       struct {
91          uint32_t num_va_pages;
92          uint32_t num_backing_pages;
93 
94          struct list_head backing;
95 
96          /* Commitment information for each page of the virtual memory area. */
97          struct zink_sparse_commitment *commitments;
98       } sparse;
99    } u;
100 
101    VkDeviceMemory mem;
102    uint64_t offset;
103 
104    uint32_t unique_id;
105 
106    simple_mtx_t lock;
107 
108    struct zink_batch_usage *reads;
109    struct zink_batch_usage *writes;
110 
111    struct pb_cache_entry cache_entry[];
112 };
113 
114 static inline struct zink_bo *
zink_bo(struct pb_buffer * pbuf)115 zink_bo(struct pb_buffer *pbuf)
116 {
117    return (struct zink_bo*)pbuf;
118 }
119 
120 static inline enum zink_alloc_flag
zink_alloc_flags_from_heap(enum zink_heap heap)121 zink_alloc_flags_from_heap(enum zink_heap heap)
122 {
123    enum zink_alloc_flag flags = 0;
124    switch (heap) {
125    case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
126       flags |= ZINK_ALLOC_SPARSE;
127       break;
128    default:
129       break;
130    }
131    return flags;
132 }
133 
134 static inline VkMemoryPropertyFlags
vk_domain_from_heap(enum zink_heap heap)135 vk_domain_from_heap(enum zink_heap heap)
136 {
137    VkMemoryPropertyFlags domains = 0;
138 
139    switch (heap) {
140    case ZINK_HEAP_DEVICE_LOCAL:
141    case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
142       domains = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
143       break;
144    case ZINK_HEAP_DEVICE_LOCAL_LAZY:
145       domains = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
146       break;
147    case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
148       domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
149       break;
150    case ZINK_HEAP_HOST_VISIBLE_COHERENT:
151       domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
152       break;
153    case ZINK_HEAP_HOST_VISIBLE_CACHED:
154       domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
155       break;
156    default:
157       break;
158    }
159    return domains;
160 }
161 
162 static inline enum zink_heap
zink_heap_from_domain_flags(VkMemoryPropertyFlags domains,enum zink_alloc_flag flags)163 zink_heap_from_domain_flags(VkMemoryPropertyFlags domains, enum zink_alloc_flag flags)
164 {
165    if (flags & ZINK_ALLOC_SPARSE)
166       return ZINK_HEAP_DEVICE_LOCAL_SPARSE;
167 
168    if ((domains & VK_VIS_VRAM) == VK_VIS_VRAM)
169       return ZINK_HEAP_DEVICE_LOCAL_VISIBLE;
170 
171    if (domains & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
172       return ZINK_HEAP_DEVICE_LOCAL;
173 
174    if (domains & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
175       return ZINK_HEAP_HOST_VISIBLE_CACHED;
176 
177    return ZINK_HEAP_HOST_VISIBLE_COHERENT;
178 }
179 
180 bool
181 zink_bo_init(struct zink_screen *screen);
182 
183 void
184 zink_bo_deinit(struct zink_screen *screen);
185 
186 struct pb_buffer *
187 zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext);
188 
189 bool
190 zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle);
191 
192 static inline uint64_t
zink_bo_get_offset(const struct zink_bo * bo)193 zink_bo_get_offset(const struct zink_bo *bo)
194 {
195    return bo->offset;
196 }
197 
198 static inline VkDeviceMemory
zink_bo_get_mem(const struct zink_bo * bo)199 zink_bo_get_mem(const struct zink_bo *bo)
200 {
201    return bo->mem ? bo->mem : bo->u.slab.real->mem;
202 }
203 
204 static inline VkDeviceSize
zink_bo_get_size(const struct zink_bo * bo)205 zink_bo_get_size(const struct zink_bo *bo)
206 {
207    return bo->mem ? bo->base.size : bo->u.slab.real->base.size;
208 }
209 
210 void *
211 zink_bo_map(struct zink_screen *screen, struct zink_bo *bo);
212 void
213 zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
214 
215 bool
216 zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem);
217 
218 static inline bool
zink_bo_has_unflushed_usage(const struct zink_bo * bo)219 zink_bo_has_unflushed_usage(const struct zink_bo *bo)
220 {
221    return zink_batch_usage_is_unflushed(bo->reads) ||
222           zink_batch_usage_is_unflushed(bo->writes);
223 }
224 
225 static inline bool
zink_bo_has_usage(const struct zink_bo * bo)226 zink_bo_has_usage(const struct zink_bo *bo)
227 {
228    return zink_batch_usage_exists(bo->reads) ||
229           zink_batch_usage_exists(bo->writes);
230 }
231 
232 static inline bool
zink_bo_usage_matches(const struct zink_bo * bo,const struct zink_batch_state * bs)233 zink_bo_usage_matches(const struct zink_bo *bo, const struct zink_batch_state *bs)
234 {
235    return zink_batch_usage_matches(bo->reads, bs) ||
236           zink_batch_usage_matches(bo->writes, bs);
237 }
238 
239 static inline bool
zink_bo_usage_check_completion(struct zink_screen * screen,struct zink_bo * bo,enum zink_resource_access access)240 zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
241 {
242    if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads))
243       return false;
244    if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes))
245       return false;
246    return true;
247 }
248 
249 static inline void
zink_bo_usage_wait(struct zink_context * ctx,struct zink_bo * bo,enum zink_resource_access access)250 zink_bo_usage_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
251 {
252    if (access & ZINK_RESOURCE_ACCESS_READ)
253       zink_batch_usage_wait(ctx, bo->reads);
254    if (access & ZINK_RESOURCE_ACCESS_WRITE)
255       zink_batch_usage_wait(ctx, bo->writes);
256 }
257 
258 static inline void
zink_bo_usage_set(struct zink_bo * bo,struct zink_batch_state * bs,bool write)259 zink_bo_usage_set(struct zink_bo *bo, struct zink_batch_state *bs, bool write)
260 {
261    if (write)
262       zink_batch_usage_set(&bo->writes, bs);
263    else
264       zink_batch_usage_set(&bo->reads, bs);
265 }
266 
267 static inline bool
zink_bo_usage_unset(struct zink_bo * bo,struct zink_batch_state * bs)268 zink_bo_usage_unset(struct zink_bo *bo, struct zink_batch_state *bs)
269 {
270    zink_batch_usage_unset(&bo->reads, bs);
271    zink_batch_usage_unset(&bo->writes, bs);
272    return bo->reads || bo->writes;
273 }
274 
275 
276 static inline void
zink_bo_unref(struct zink_screen * screen,struct zink_bo * bo)277 zink_bo_unref(struct zink_screen *screen, struct zink_bo *bo)
278 {
279    struct pb_buffer *pbuf = &bo->base;
280    pb_reference_with_winsys(screen, &pbuf, NULL);
281 }
282 
283 #endif
284