1 /*
2 * Copyright © 2021 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25 */
26
27 #ifndef ZINK_BO_H
28 #define ZINK_BO_H
29 #include <vulkan/vulkan.h>
30 #include "pipebuffer/pb_cache.h"
31 #include "pipebuffer/pb_slab.h"
32 #include "zink_batch.h"
33
34 #define VK_VIS_VRAM (VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
35 #define VK_LAZY_VRAM (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
36 enum zink_resource_access {
37 ZINK_RESOURCE_ACCESS_READ = 1,
38 ZINK_RESOURCE_ACCESS_WRITE = 32,
39 ZINK_RESOURCE_ACCESS_RW = ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE,
40 };
41
42
43 enum zink_heap {
44 ZINK_HEAP_DEVICE_LOCAL,
45 ZINK_HEAP_DEVICE_LOCAL_SPARSE,
46 ZINK_HEAP_DEVICE_LOCAL_LAZY,
47 ZINK_HEAP_DEVICE_LOCAL_VISIBLE,
48 ZINK_HEAP_HOST_VISIBLE_COHERENT,
49 ZINK_HEAP_HOST_VISIBLE_CACHED,
50 ZINK_HEAP_MAX,
51 };
52
53 enum zink_alloc_flag {
54 ZINK_ALLOC_SPARSE = 1<<0,
55 ZINK_ALLOC_NO_SUBALLOC = 1<<1,
56 };
57
58
59 struct zink_bo {
60 struct pb_buffer base;
61
62 union {
63 struct {
64 void *cpu_ptr; /* for user_ptr and permanent maps */
65 int map_count;
66
67 bool is_user_ptr;
68 bool use_reusable_pool;
69
70 /* Whether buffer_get_handle or buffer_from_handle has been called,
71 * it can only transition from false to true. Protected by lock.
72 */
73 bool is_shared;
74 } real;
75 struct {
76 struct pb_slab_entry entry;
77 struct zink_bo *real;
78 } slab;
79 struct {
80 uint32_t num_va_pages;
81 uint32_t num_backing_pages;
82
83 struct list_head backing;
84
85 /* Commitment information for each page of the virtual memory area. */
86 struct zink_sparse_commitment *commitments;
87 } sparse;
88 } u;
89
90 VkDeviceMemory mem;
91 uint64_t offset;
92
93 uint32_t unique_id;
94
95 simple_mtx_t lock;
96
97 struct zink_batch_usage *reads;
98 struct zink_batch_usage *writes;
99
100 struct pb_cache_entry cache_entry[];
101 };
102
103 static inline struct zink_bo *
zink_bo(struct pb_buffer * pbuf)104 zink_bo(struct pb_buffer *pbuf)
105 {
106 return (struct zink_bo*)pbuf;
107 }
108
109 static inline enum zink_alloc_flag
zink_alloc_flags_from_heap(enum zink_heap heap)110 zink_alloc_flags_from_heap(enum zink_heap heap)
111 {
112 enum zink_alloc_flag flags = 0;
113 switch (heap) {
114 case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
115 flags |= ZINK_ALLOC_SPARSE;
116 break;
117 default:
118 break;
119 }
120 return flags;
121 }
122
123 static inline VkMemoryPropertyFlags
vk_domain_from_heap(enum zink_heap heap)124 vk_domain_from_heap(enum zink_heap heap)
125 {
126 VkMemoryPropertyFlags domains = 0;
127
128 switch (heap) {
129 case ZINK_HEAP_DEVICE_LOCAL:
130 case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
131 domains = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
132 break;
133 case ZINK_HEAP_DEVICE_LOCAL_LAZY:
134 domains = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
135 break;
136 case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
137 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
138 break;
139 case ZINK_HEAP_HOST_VISIBLE_COHERENT:
140 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
141 break;
142 case ZINK_HEAP_HOST_VISIBLE_CACHED:
143 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
144 break;
145 default:
146 break;
147 }
148 return domains;
149 }
150
151 static inline enum zink_heap
zink_heap_from_domain_flags(VkMemoryPropertyFlags domains,enum zink_alloc_flag flags)152 zink_heap_from_domain_flags(VkMemoryPropertyFlags domains, enum zink_alloc_flag flags)
153 {
154 if (flags & ZINK_ALLOC_SPARSE)
155 return ZINK_HEAP_DEVICE_LOCAL_SPARSE;
156
157 if ((domains & VK_VIS_VRAM) == VK_VIS_VRAM)
158 return ZINK_HEAP_DEVICE_LOCAL_VISIBLE;
159
160 if (domains & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
161 return ZINK_HEAP_DEVICE_LOCAL;
162
163 if (domains & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
164 return ZINK_HEAP_HOST_VISIBLE_CACHED;
165
166 return ZINK_HEAP_HOST_VISIBLE_COHERENT;
167 }
168
169 bool
170 zink_bo_init(struct zink_screen *screen);
171
172 void
173 zink_bo_deinit(struct zink_screen *screen);
174
175 struct pb_buffer *
176 zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext);
177
178 static inline uint64_t
zink_bo_get_offset(const struct zink_bo * bo)179 zink_bo_get_offset(const struct zink_bo *bo)
180 {
181 return bo->offset;
182 }
183
184 static inline VkDeviceMemory
zink_bo_get_mem(const struct zink_bo * bo)185 zink_bo_get_mem(const struct zink_bo *bo)
186 {
187 return bo->mem ? bo->mem : bo->u.slab.real->mem;
188 }
189
190 static inline VkDeviceSize
zink_bo_get_size(const struct zink_bo * bo)191 zink_bo_get_size(const struct zink_bo *bo)
192 {
193 return bo->mem ? bo->base.size : bo->u.slab.real->base.size;
194 }
195
196 void *
197 zink_bo_map(struct zink_screen *screen, struct zink_bo *bo);
198 void
199 zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
200
201 bool
202 zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t offset, uint32_t size, bool commit);
203
204 static inline bool
zink_bo_has_unflushed_usage(const struct zink_bo * bo)205 zink_bo_has_unflushed_usage(const struct zink_bo *bo)
206 {
207 return zink_batch_usage_is_unflushed(bo->reads) ||
208 zink_batch_usage_is_unflushed(bo->writes);
209 }
210
211 static inline bool
zink_bo_has_usage(const struct zink_bo * bo)212 zink_bo_has_usage(const struct zink_bo *bo)
213 {
214 return zink_batch_usage_exists(bo->reads) ||
215 zink_batch_usage_exists(bo->writes);
216 }
217
218 static inline bool
zink_bo_usage_matches(const struct zink_bo * bo,const struct zink_batch_state * bs)219 zink_bo_usage_matches(const struct zink_bo *bo, const struct zink_batch_state *bs)
220 {
221 return zink_batch_usage_matches(bo->reads, bs) ||
222 zink_batch_usage_matches(bo->writes, bs);
223 }
224
225 static inline bool
zink_bo_usage_check_completion(struct zink_screen * screen,struct zink_bo * bo,enum zink_resource_access access)226 zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
227 {
228 if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads))
229 return false;
230 if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes))
231 return false;
232 return true;
233 }
234
235 static inline void
zink_bo_usage_wait(struct zink_context * ctx,struct zink_bo * bo,enum zink_resource_access access)236 zink_bo_usage_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
237 {
238 if (access & ZINK_RESOURCE_ACCESS_READ)
239 zink_batch_usage_wait(ctx, bo->reads);
240 if (access & ZINK_RESOURCE_ACCESS_WRITE)
241 zink_batch_usage_wait(ctx, bo->writes);
242 }
243
244 static inline void
zink_bo_usage_set(struct zink_bo * bo,struct zink_batch_state * bs,bool write)245 zink_bo_usage_set(struct zink_bo *bo, struct zink_batch_state *bs, bool write)
246 {
247 if (write)
248 zink_batch_usage_set(&bo->writes, bs);
249 else
250 zink_batch_usage_set(&bo->reads, bs);
251 }
252
253 static inline bool
zink_bo_usage_unset(struct zink_bo * bo,struct zink_batch_state * bs)254 zink_bo_usage_unset(struct zink_bo *bo, struct zink_batch_state *bs)
255 {
256 zink_batch_usage_unset(&bo->reads, bs);
257 zink_batch_usage_unset(&bo->writes, bs);
258 return bo->reads || bo->writes;
259 }
260
261
262 static inline void
zink_bo_unref(struct zink_screen * screen,struct zink_bo * bo)263 zink_bo_unref(struct zink_screen *screen, struct zink_bo *bo)
264 {
265 struct pb_buffer *pbuf = &bo->base;
266 pb_reference_with_winsys(screen, &pbuf, NULL);
267 }
268
269 #endif
270