1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_program.h"
32 #include "zink_pipeline.h"
33 #include "zink_query.h"
34 #include "zink_render_pass.h"
35 #include "zink_resource.h"
36 #include "zink_screen.h"
37 #include "zink_state.h"
38 #include "zink_surface.h"
39
40 #include "indices/u_primconvert.h"
41 #include "util/u_blitter.h"
42 #include "util/u_debug.h"
43 #include "util/format/u_format.h"
44 #include "util/u_framebuffer.h"
45 #include "util/u_helpers.h"
46 #include "util/u_inlines.h"
47
48 #include "nir.h"
49
50 #include "util/u_memory.h"
51 #include "util/u_upload_mgr.h"
52
53 static void
zink_context_destroy(struct pipe_context * pctx)54 zink_context_destroy(struct pipe_context *pctx)
55 {
56 struct zink_context *ctx = zink_context(pctx);
57 struct zink_screen *screen = zink_screen(pctx->screen);
58
59 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
60 debug_printf("vkQueueWaitIdle failed\n");
61
62 pipe_resource_reference(&ctx->dummy_buffer, NULL);
63 for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
64 pipe_resource_reference(&ctx->null_buffers[i], NULL);
65
66 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
67 vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
68 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
69 }
70 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
71
72 util_primconvert_destroy(ctx->primconvert);
73 u_upload_destroy(pctx->stream_uploader);
74 slab_destroy_child(&ctx->transfer_pool);
75 util_blitter_destroy(ctx->blitter);
76 FREE(ctx);
77 }
78
79 static enum pipe_reset_status
zink_get_device_reset_status(struct pipe_context * pctx)80 zink_get_device_reset_status(struct pipe_context *pctx)
81 {
82 struct zink_context *ctx = zink_context(pctx);
83
84 enum pipe_reset_status status = PIPE_NO_RESET;
85
86 if (ctx->is_device_lost) {
87 // Since we don't know what really happened to the hardware, just
88 // assume that we are in the wrong
89 status = PIPE_GUILTY_CONTEXT_RESET;
90
91 debug_printf("ZINK: device lost detected!\n");
92
93 if (ctx->reset.reset)
94 ctx->reset.reset(ctx->reset.data, status);
95 }
96
97 return status;
98 }
99
100 static void
zink_set_device_reset_callback(struct pipe_context * pctx,const struct pipe_device_reset_callback * cb)101 zink_set_device_reset_callback(struct pipe_context *pctx,
102 const struct pipe_device_reset_callback *cb)
103 {
104 struct zink_context *ctx = zink_context(pctx);
105
106 if (cb)
107 ctx->reset = *cb;
108 else
109 memset(&ctx->reset, 0, sizeof(ctx->reset));
110 }
111
112 static VkSamplerMipmapMode
sampler_mipmap_mode(enum pipe_tex_mipfilter filter)113 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
114 {
115 switch (filter) {
116 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
117 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
118 case PIPE_TEX_MIPFILTER_NONE:
119 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
120 }
121 unreachable("unexpected filter");
122 }
123
124 static VkSamplerAddressMode
sampler_address_mode(enum pipe_tex_wrap filter)125 sampler_address_mode(enum pipe_tex_wrap filter)
126 {
127 switch (filter) {
128 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
129 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
130 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
131 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
132 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
133 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
134 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
135 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
136 }
137 unreachable("unexpected wrap");
138 }
139
140 static VkCompareOp
compare_op(enum pipe_compare_func op)141 compare_op(enum pipe_compare_func op)
142 {
143 switch (op) {
144 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
145 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
146 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
147 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
148 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
149 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
150 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
151 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
152 }
153 unreachable("unexpected compare");
154 }
155
156 static void *
zink_create_sampler_state(struct pipe_context * pctx,const struct pipe_sampler_state * state)157 zink_create_sampler_state(struct pipe_context *pctx,
158 const struct pipe_sampler_state *state)
159 {
160 struct zink_screen *screen = zink_screen(pctx->screen);
161
162 VkSamplerCreateInfo sci = {};
163 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
164 sci.magFilter = zink_filter(state->mag_img_filter);
165 sci.minFilter = zink_filter(state->min_img_filter);
166
167 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
168 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
169 sci.minLod = state->min_lod;
170 sci.maxLod = state->max_lod;
171 } else {
172 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
173 sci.minLod = 0;
174 sci.maxLod = 0;
175 }
176
177 sci.addressModeU = sampler_address_mode(state->wrap_s);
178 sci.addressModeV = sampler_address_mode(state->wrap_t);
179 sci.addressModeW = sampler_address_mode(state->wrap_r);
180 sci.mipLodBias = state->lod_bias;
181
182 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
183 sci.compareOp = VK_COMPARE_OP_NEVER;
184 else {
185 sci.compareOp = compare_op(state->compare_func);
186 sci.compareEnable = VK_TRUE;
187 }
188
189 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
190 sci.unnormalizedCoordinates = !state->normalized_coords;
191
192 if (state->max_anisotropy > 1) {
193 sci.maxAnisotropy = state->max_anisotropy;
194 sci.anisotropyEnable = VK_TRUE;
195 }
196
197 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
198 if (!sampler)
199 return NULL;
200
201 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
202 FREE(sampler);
203 return NULL;
204 }
205
206 return sampler;
207 }
208
209 static void
zink_bind_sampler_states(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)210 zink_bind_sampler_states(struct pipe_context *pctx,
211 enum pipe_shader_type shader,
212 unsigned start_slot,
213 unsigned num_samplers,
214 void **samplers)
215 {
216 struct zink_context *ctx = zink_context(pctx);
217 for (unsigned i = 0; i < num_samplers; ++i) {
218 VkSampler *sampler = samplers[i];
219 ctx->sampler_states[shader][start_slot + i] = sampler;
220 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
221 }
222 ctx->num_samplers[shader] = start_slot + num_samplers;
223 }
224
225 static void
zink_delete_sampler_state(struct pipe_context * pctx,void * sampler_state)226 zink_delete_sampler_state(struct pipe_context *pctx,
227 void *sampler_state)
228 {
229 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
230 util_dynarray_append(&batch->zombie_samplers, VkSampler,
231 *(VkSampler *)sampler_state);
232 FREE(sampler_state);
233 }
234
235
236 static VkImageViewType
image_view_type(enum pipe_texture_target target)237 image_view_type(enum pipe_texture_target target)
238 {
239 switch (target) {
240 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
241 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
242 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
243 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
244 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
245 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
246 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
247 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
248 default:
249 unreachable("unexpected target");
250 }
251 }
252
253 static VkComponentSwizzle
component_mapping(enum pipe_swizzle swizzle)254 component_mapping(enum pipe_swizzle swizzle)
255 {
256 switch (swizzle) {
257 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
258 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
259 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
260 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
261 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
262 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
263 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
264 default:
265 unreachable("unexpected swizzle");
266 }
267 }
268
269 static VkImageAspectFlags
sampler_aspect_from_format(enum pipe_format fmt)270 sampler_aspect_from_format(enum pipe_format fmt)
271 {
272 if (util_format_is_depth_or_stencil(fmt)) {
273 const struct util_format_description *desc = util_format_description(fmt);
274 if (util_format_has_depth(desc))
275 return VK_IMAGE_ASPECT_DEPTH_BIT;
276 assert(util_format_has_stencil(desc));
277 return VK_IMAGE_ASPECT_STENCIL_BIT;
278 } else
279 return VK_IMAGE_ASPECT_COLOR_BIT;
280 }
281
282 static struct pipe_sampler_view *
zink_create_sampler_view(struct pipe_context * pctx,struct pipe_resource * pres,const struct pipe_sampler_view * state)283 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
284 const struct pipe_sampler_view *state)
285 {
286 struct zink_screen *screen = zink_screen(pctx->screen);
287 struct zink_resource *res = zink_resource(pres);
288 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
289 VkResult err;
290
291 sampler_view->base = *state;
292 sampler_view->base.texture = NULL;
293 pipe_resource_reference(&sampler_view->base.texture, pres);
294 sampler_view->base.reference.count = 1;
295 sampler_view->base.context = pctx;
296
297 if (state->target != PIPE_BUFFER) {
298 VkImageViewCreateInfo ivci = {};
299 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
300 ivci.image = res->image;
301 ivci.viewType = image_view_type(state->target);
302 ivci.format = zink_get_format(screen, state->format);
303 assert(ivci.format);
304 ivci.components.r = component_mapping(state->swizzle_r);
305 ivci.components.g = component_mapping(state->swizzle_g);
306 ivci.components.b = component_mapping(state->swizzle_b);
307 ivci.components.a = component_mapping(state->swizzle_a);
308
309 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
310 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
311 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
312 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
313 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
314
315 err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
316 } else {
317 VkBufferViewCreateInfo bvci = {};
318 bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
319 bvci.buffer = res->buffer;
320 bvci.format = zink_get_format(screen, state->format);
321 assert(bvci.format);
322 bvci.offset = state->u.buf.offset;
323 bvci.range = state->u.buf.size;
324
325 err = vkCreateBufferView(screen->dev, &bvci, NULL, &sampler_view->buffer_view);
326 }
327 if (err != VK_SUCCESS) {
328 FREE(sampler_view);
329 return NULL;
330 }
331 return &sampler_view->base;
332 }
333
334 static void
zink_sampler_view_destroy(struct pipe_context * pctx,struct pipe_sampler_view * pview)335 zink_sampler_view_destroy(struct pipe_context *pctx,
336 struct pipe_sampler_view *pview)
337 {
338 struct zink_sampler_view *view = zink_sampler_view(pview);
339 if (pview->texture->target == PIPE_BUFFER)
340 vkDestroyBufferView(zink_screen(pctx->screen)->dev, view->buffer_view, NULL);
341 else
342 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
343 pipe_resource_reference(&pview->texture, NULL);
344 FREE(view);
345 }
346
347 static void
zink_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned sample_index,float * out_value)348 zink_get_sample_position(struct pipe_context *ctx,
349 unsigned sample_count,
350 unsigned sample_index,
351 float *out_value)
352 {
353 /* TODO: handle this I guess */
354 assert(zink_screen(ctx->screen)->info.props.limits.standardSampleLocations);
355 /* from 26.4. Multisampling */
356 switch (sample_count) {
357 case 0:
358 case 1: {
359 float pos[][2] = { {0.5,0.5}, };
360 out_value[0] = pos[sample_index][0];
361 out_value[1] = pos[sample_index][1];
362 break;
363 }
364 case 2: {
365 float pos[][2] = { {0.75,0.75},
366 {0.25,0.25}, };
367 out_value[0] = pos[sample_index][0];
368 out_value[1] = pos[sample_index][1];
369 break;
370 }
371 case 4: {
372 float pos[][2] = { {0.375, 0.125},
373 {0.875, 0.375},
374 {0.125, 0.625},
375 {0.625, 0.875}, };
376 out_value[0] = pos[sample_index][0];
377 out_value[1] = pos[sample_index][1];
378 break;
379 }
380 case 8: {
381 float pos[][2] = { {0.5625, 0.3125},
382 {0.4375, 0.6875},
383 {0.8125, 0.5625},
384 {0.3125, 0.1875},
385 {0.1875, 0.8125},
386 {0.0625, 0.4375},
387 {0.6875, 0.9375},
388 {0.9375, 0.0625}, };
389 out_value[0] = pos[sample_index][0];
390 out_value[1] = pos[sample_index][1];
391 break;
392 }
393 case 16: {
394 float pos[][2] = { {0.5625, 0.5625},
395 {0.4375, 0.3125},
396 {0.3125, 0.625},
397 {0.75, 0.4375},
398 {0.1875, 0.375},
399 {0.625, 0.8125},
400 {0.8125, 0.6875},
401 {0.6875, 0.1875},
402 {0.375, 0.875},
403 {0.5, 0.0625},
404 {0.25, 0.125},
405 {0.125, 0.75},
406 {0.0, 0.5},
407 {0.9375, 0.25},
408 {0.875, 0.9375},
409 {0.0625, 0.0}, };
410 out_value[0] = pos[sample_index][0];
411 out_value[1] = pos[sample_index][1];
412 break;
413 }
414 default:
415 unreachable("unhandled sample count!");
416 }
417 }
418
419 static void
zink_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * ps)420 zink_set_polygon_stipple(struct pipe_context *pctx,
421 const struct pipe_poly_stipple *ps)
422 {
423 }
424
425 static void
zink_set_vertex_buffers(struct pipe_context * pctx,unsigned start_slot,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)426 zink_set_vertex_buffers(struct pipe_context *pctx,
427 unsigned start_slot,
428 unsigned num_buffers,
429 const struct pipe_vertex_buffer *buffers)
430 {
431 struct zink_context *ctx = zink_context(pctx);
432
433 if (buffers) {
434 for (int i = 0; i < num_buffers; ++i) {
435 const struct pipe_vertex_buffer *vb = buffers + i;
436 struct zink_resource *res = zink_resource(vb->buffer.resource);
437
438 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
439 if (res && res->needs_xfb_barrier) {
440 /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
441 * that we use the right buffer data
442 */
443 pctx->flush(pctx, NULL, 0);
444 res->needs_xfb_barrier = false;
445 }
446 }
447 ctx->gfx_pipeline_state.hash = 0;
448 }
449
450 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
451 buffers, start_slot, num_buffers);
452 }
453
454 static void
zink_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)455 zink_set_viewport_states(struct pipe_context *pctx,
456 unsigned start_slot,
457 unsigned num_viewports,
458 const struct pipe_viewport_state *state)
459 {
460 struct zink_context *ctx = zink_context(pctx);
461
462 for (unsigned i = 0; i < num_viewports; ++i) {
463 VkViewport viewport = {
464 state[i].translate[0] - state[i].scale[0],
465 state[i].translate[1] - state[i].scale[1],
466 state[i].scale[0] * 2,
467 state[i].scale[1] * 2,
468 state[i].translate[2] - state[i].scale[2],
469 state[i].translate[2] + state[i].scale[2]
470 };
471 ctx->viewport_states[start_slot + i] = state[i];
472 ctx->viewports[start_slot + i] = viewport;
473 }
474 if (ctx->gfx_pipeline_state.num_viewports != start_slot + num_viewports)
475 ctx->gfx_pipeline_state.hash = 0;
476 ctx->gfx_pipeline_state.num_viewports = start_slot + num_viewports;
477 }
478
479 static void
zink_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * states)480 zink_set_scissor_states(struct pipe_context *pctx,
481 unsigned start_slot, unsigned num_scissors,
482 const struct pipe_scissor_state *states)
483 {
484 struct zink_context *ctx = zink_context(pctx);
485
486 for (unsigned i = 0; i < num_scissors; i++) {
487 VkRect2D scissor;
488
489 scissor.offset.x = states[i].minx;
490 scissor.offset.y = states[i].miny;
491 scissor.extent.width = states[i].maxx - states[i].minx;
492 scissor.extent.height = states[i].maxy - states[i].miny;
493 ctx->scissor_states[start_slot + i] = states[i];
494 ctx->scissors[start_slot + i] = scissor;
495 }
496 }
497
498 static void
zink_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,const struct pipe_constant_buffer * cb)499 zink_set_constant_buffer(struct pipe_context *pctx,
500 enum pipe_shader_type shader, uint index,
501 const struct pipe_constant_buffer *cb)
502 {
503 struct zink_context *ctx = zink_context(pctx);
504
505 if (cb) {
506 struct pipe_resource *buffer = cb->buffer;
507 unsigned offset = cb->buffer_offset;
508 if (cb->user_buffer) {
509 struct zink_screen *screen = zink_screen(pctx->screen);
510 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
511 screen->info.props.limits.minUniformBufferOffsetAlignment,
512 cb->user_buffer, &offset, &buffer);
513 }
514
515 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
516 ctx->ubos[shader][index].buffer_offset = offset;
517 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
518 ctx->ubos[shader][index].user_buffer = NULL;
519
520 if (cb->user_buffer)
521 pipe_resource_reference(&buffer, NULL);
522 } else {
523 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
524 ctx->ubos[shader][index].buffer_offset = 0;
525 ctx->ubos[shader][index].buffer_size = 0;
526 ctx->ubos[shader][index].user_buffer = NULL;
527 }
528 }
529
530 static void
zink_set_sampler_views(struct pipe_context * pctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,struct pipe_sampler_view ** views)531 zink_set_sampler_views(struct pipe_context *pctx,
532 enum pipe_shader_type shader_type,
533 unsigned start_slot,
534 unsigned num_views,
535 struct pipe_sampler_view **views)
536 {
537 struct zink_context *ctx = zink_context(pctx);
538 for (unsigned i = 0; i < num_views; ++i) {
539 struct pipe_sampler_view *pview = views ? views[i] : NULL;
540 pipe_sampler_view_reference(
541 &ctx->image_views[shader_type][start_slot + i],
542 pview);
543 }
544 ctx->num_image_views[shader_type] = start_slot + num_views;
545 }
546
547 static void
zink_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref * ref)548 zink_set_stencil_ref(struct pipe_context *pctx,
549 const struct pipe_stencil_ref *ref)
550 {
551 struct zink_context *ctx = zink_context(pctx);
552 ctx->stencil_ref = *ref;
553 }
554
555 static void
zink_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * pcs)556 zink_set_clip_state(struct pipe_context *pctx,
557 const struct pipe_clip_state *pcs)
558 {
559 }
560
561 static struct zink_render_pass *
get_render_pass(struct zink_context * ctx)562 get_render_pass(struct zink_context *ctx)
563 {
564 struct zink_screen *screen = zink_screen(ctx->base.screen);
565 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
566 struct zink_render_pass_state state = { 0 };
567
568 for (int i = 0; i < fb->nr_cbufs; i++) {
569 struct pipe_surface *surf = fb->cbufs[i];
570 if (surf) {
571 state.rts[i].format = zink_get_format(screen, surf->format);
572 state.rts[i].samples = surf->texture->nr_samples > 0 ? surf->texture->nr_samples :
573 VK_SAMPLE_COUNT_1_BIT;
574 } else {
575 state.rts[i].format = VK_FORMAT_R8_UINT;
576 state.rts[i].samples = MAX2(fb->samples, 1);
577 }
578 }
579 state.num_cbufs = fb->nr_cbufs;
580
581 if (fb->zsbuf) {
582 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
583 state.rts[fb->nr_cbufs].format = zsbuf->format;
584 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
585 }
586 state.have_zsbuf = fb->zsbuf != NULL;
587
588 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
589 &state);
590 if (!entry) {
591 struct zink_render_pass *rp;
592 rp = zink_create_render_pass(screen, &state);
593 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
594 if (!entry)
595 return NULL;
596 }
597
598 return entry->data;
599 }
600
601 static struct zink_framebuffer *
create_framebuffer(struct zink_context * ctx)602 create_framebuffer(struct zink_context *ctx)
603 {
604 struct zink_screen *screen = zink_screen(ctx->base.screen);
605
606 struct zink_framebuffer_state state = {};
607 state.rp = get_render_pass(ctx);
608 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
609 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
610 state.attachments[i] = zink_surface(psurf);
611 state.has_null_attachments |= !state.attachments[i];
612 }
613
614 state.num_attachments = ctx->fb_state.nr_cbufs;
615 if (ctx->fb_state.zsbuf) {
616 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
617 state.attachments[state.num_attachments++] = zink_surface(psurf);
618 }
619
620 state.width = MAX2(ctx->fb_state.width, 1);
621 state.height = MAX2(ctx->fb_state.height, 1);
622 state.layers = MAX2(util_framebuffer_get_num_layers(&ctx->fb_state), 1);
623 state.samples = ctx->fb_state.samples;
624
625 return zink_create_framebuffer(ctx, screen, &state);
626 }
627
628 static void
framebuffer_state_buffer_barriers_setup(struct zink_context * ctx,const struct pipe_framebuffer_state * state,struct zink_batch * batch)629 framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
630 const struct pipe_framebuffer_state *state, struct zink_batch *batch)
631 {
632 for (int i = 0; i < state->nr_cbufs; i++) {
633 struct pipe_surface *surf = state->cbufs[i];
634 if (!surf)
635 surf = ctx->framebuffer->null_surface;
636 struct zink_resource *res = zink_resource(surf->texture);
637 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
638 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
639 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
640 }
641
642 if (state->zsbuf) {
643 struct zink_resource *res = zink_resource(state->zsbuf->texture);
644 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
645 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
646 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
647 }
648 }
649
650 void
zink_begin_render_pass(struct zink_context * ctx,struct zink_batch * batch)651 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
652 {
653 struct zink_screen *screen = zink_screen(ctx->base.screen);
654 assert(batch == zink_curr_batch(ctx));
655 assert(ctx->gfx_pipeline_state.render_pass);
656
657 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
658
659 VkRenderPassBeginInfo rpbi = {};
660 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
661 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
662 rpbi.renderArea.offset.x = 0;
663 rpbi.renderArea.offset.y = 0;
664 rpbi.renderArea.extent.width = fb_state->width;
665 rpbi.renderArea.extent.height = fb_state->height;
666 rpbi.clearValueCount = 0;
667 rpbi.pClearValues = NULL;
668 rpbi.framebuffer = ctx->framebuffer->fb;
669
670 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
671 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
672 assert(!batch->fb || batch->fb == ctx->framebuffer);
673
674 framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
675
676 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
677 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
678 for (struct zink_surface **surf = (struct zink_surface **)batch->fb->surfaces; *surf; surf++)
679 zink_batch_reference_resource_rw(batch, zink_resource((*surf)->base.texture), true);
680
681 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
682 }
683
684 static void
flush_batch(struct zink_context * ctx)685 flush_batch(struct zink_context *ctx)
686 {
687 struct zink_batch *batch = zink_curr_batch(ctx);
688 if (batch->rp)
689 vkCmdEndRenderPass(batch->cmdbuf);
690
691 zink_end_batch(ctx, batch);
692
693 ctx->curr_batch++;
694 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
695 ctx->curr_batch = 0;
696
697 zink_start_batch(ctx, zink_curr_batch(ctx));
698 }
699
700 struct zink_batch *
zink_batch_rp(struct zink_context * ctx)701 zink_batch_rp(struct zink_context *ctx)
702 {
703 struct zink_batch *batch = zink_curr_batch(ctx);
704 if (!batch->rp) {
705 zink_begin_render_pass(ctx, batch);
706 assert(batch->rp);
707 }
708 return batch;
709 }
710
711 struct zink_batch *
zink_batch_no_rp(struct zink_context * ctx)712 zink_batch_no_rp(struct zink_context *ctx)
713 {
714 struct zink_batch *batch = zink_curr_batch(ctx);
715 if (batch->rp) {
716 /* flush batch and get a new one */
717 flush_batch(ctx);
718 batch = zink_curr_batch(ctx);
719 assert(!batch->rp);
720 }
721 return batch;
722 }
723
724 static void
zink_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * state)725 zink_set_framebuffer_state(struct pipe_context *pctx,
726 const struct pipe_framebuffer_state *state)
727 {
728 struct zink_context *ctx = zink_context(pctx);
729 struct zink_screen *screen = zink_screen(pctx->screen);
730
731 util_copy_framebuffer_state(&ctx->fb_state, state);
732
733 struct zink_framebuffer *fb = ctx->framebuffer;
734 /* explicitly unref previous fb to ensure it gets destroyed */
735 if (fb)
736 zink_framebuffer_reference(screen, &fb, NULL);
737 fb = create_framebuffer(ctx);
738 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
739 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
740
741 ctx->gfx_pipeline_state.rast_samples = util_framebuffer_get_num_samples(state);
742 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
743 ctx->gfx_pipeline_state.hash = 0;
744
745 struct zink_batch *batch = zink_batch_no_rp(ctx);
746
747 framebuffer_state_buffer_barriers_setup(ctx, state, batch);
748 }
749
750 static void
zink_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * color)751 zink_set_blend_color(struct pipe_context *pctx,
752 const struct pipe_blend_color *color)
753 {
754 struct zink_context *ctx = zink_context(pctx);
755 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
756 }
757
758 static void
zink_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)759 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
760 {
761 struct zink_context *ctx = zink_context(pctx);
762 ctx->gfx_pipeline_state.sample_mask = sample_mask;
763 ctx->gfx_pipeline_state.hash = 0;
764 }
765
766 static VkAccessFlags
access_src_flags(VkImageLayout layout)767 access_src_flags(VkImageLayout layout)
768 {
769 switch (layout) {
770 case VK_IMAGE_LAYOUT_UNDEFINED:
771 case VK_IMAGE_LAYOUT_GENERAL:
772 return 0;
773
774 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
775 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
776 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
777 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
778
779 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
780 return VK_ACCESS_SHADER_READ_BIT;
781
782 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
783 return VK_ACCESS_TRANSFER_READ_BIT;
784
785 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
786 return VK_ACCESS_TRANSFER_WRITE_BIT;
787
788 case VK_IMAGE_LAYOUT_PREINITIALIZED:
789 return VK_ACCESS_HOST_WRITE_BIT;
790
791 default:
792 unreachable("unexpected layout");
793 }
794 }
795
796 static VkAccessFlags
access_dst_flags(VkImageLayout layout)797 access_dst_flags(VkImageLayout layout)
798 {
799 switch (layout) {
800 case VK_IMAGE_LAYOUT_UNDEFINED:
801 case VK_IMAGE_LAYOUT_GENERAL:
802 return 0;
803
804 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
805 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
806 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
807 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
808
809 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
810 return VK_ACCESS_TRANSFER_READ_BIT;
811
812 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
813 return VK_ACCESS_TRANSFER_WRITE_BIT;
814
815 default:
816 unreachable("unexpected layout");
817 }
818 }
819
820 static VkPipelineStageFlags
pipeline_dst_stage(VkImageLayout layout)821 pipeline_dst_stage(VkImageLayout layout)
822 {
823 switch (layout) {
824 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
825 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
826 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
827 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
828
829 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
830 return VK_PIPELINE_STAGE_TRANSFER_BIT;
831 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
832 return VK_PIPELINE_STAGE_TRANSFER_BIT;
833
834 default:
835 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
836 }
837 }
838
839 static VkPipelineStageFlags
pipeline_src_stage(VkImageLayout layout)840 pipeline_src_stage(VkImageLayout layout)
841 {
842 switch (layout) {
843 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
844 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
845 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
846 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
847
848 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
849 return VK_PIPELINE_STAGE_TRANSFER_BIT;
850 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
851 return VK_PIPELINE_STAGE_TRANSFER_BIT;
852
853 default:
854 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
855 }
856 }
857
858
859 void
zink_resource_barrier(VkCommandBuffer cmdbuf,struct zink_resource * res,VkImageAspectFlags aspect,VkImageLayout new_layout)860 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
861 VkImageAspectFlags aspect, VkImageLayout new_layout)
862 {
863 VkImageSubresourceRange isr = {
864 aspect,
865 0, VK_REMAINING_MIP_LEVELS,
866 0, VK_REMAINING_ARRAY_LAYERS
867 };
868
869 VkImageMemoryBarrier imb = {
870 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
871 NULL,
872 access_src_flags(res->layout),
873 access_dst_flags(new_layout),
874 res->layout,
875 new_layout,
876 VK_QUEUE_FAMILY_IGNORED,
877 VK_QUEUE_FAMILY_IGNORED,
878 res->image,
879 isr
880 };
881 vkCmdPipelineBarrier(
882 cmdbuf,
883 pipeline_src_stage(res->layout),
884 pipeline_dst_stage(new_layout),
885 0,
886 0, NULL,
887 0, NULL,
888 1, &imb
889 );
890
891 res->layout = new_layout;
892 }
893
894 static void
zink_clear(struct pipe_context * pctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * pcolor,double depth,unsigned stencil)895 zink_clear(struct pipe_context *pctx,
896 unsigned buffers,
897 const struct pipe_scissor_state *scissor_state,
898 const union pipe_color_union *pcolor,
899 double depth, unsigned stencil)
900 {
901 struct zink_context *ctx = zink_context(pctx);
902 struct pipe_framebuffer_state *fb = &ctx->fb_state;
903
904 /* FIXME: this is very inefficient; if no renderpass has been started yet,
905 * we should record the clear if it's full-screen, and apply it as we
906 * start the render-pass. Otherwise we can do a partial out-of-renderpass
907 * clear.
908 */
909 struct zink_batch *batch = zink_batch_rp(ctx);
910
911 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
912 int num_attachments = 0;
913
914 if (buffers & PIPE_CLEAR_COLOR) {
915 VkClearColorValue color;
916 color.float32[0] = pcolor->f[0];
917 color.float32[1] = pcolor->f[1];
918 color.float32[2] = pcolor->f[2];
919 color.float32[3] = pcolor->f[3];
920
921 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
922 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
923 continue;
924
925 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
926 attachments[num_attachments].colorAttachment = i;
927 attachments[num_attachments].clearValue.color = color;
928 ++num_attachments;
929 }
930 }
931
932 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
933 VkImageAspectFlags aspect = 0;
934 if (buffers & PIPE_CLEAR_DEPTH)
935 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
936 if (buffers & PIPE_CLEAR_STENCIL)
937 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
938
939 attachments[num_attachments].aspectMask = aspect;
940 attachments[num_attachments].clearValue.depthStencil.depth = depth;
941 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
942 ++num_attachments;
943 }
944
945 VkClearRect cr;
946 cr.rect.offset.x = 0;
947 cr.rect.offset.y = 0;
948 cr.rect.extent.width = fb->width;
949 cr.rect.extent.height = fb->height;
950 cr.baseArrayLayer = 0;
951 cr.layerCount = util_framebuffer_get_num_layers(fb);
952 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
953 }
954
955 VkShaderStageFlagBits
zink_shader_stage(enum pipe_shader_type type)956 zink_shader_stage(enum pipe_shader_type type)
957 {
958 VkShaderStageFlagBits stages[] = {
959 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
960 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
961 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
962 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
963 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
964 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
965 };
966 return stages[type];
967 }
968
969 static uint32_t
hash_gfx_program(const void * key)970 hash_gfx_program(const void *key)
971 {
972 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT));
973 }
974
975 static bool
equals_gfx_program(const void * a,const void * b)976 equals_gfx_program(const void *a, const void *b)
977 {
978 return memcmp(a, b, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT)) == 0;
979 }
980
981 static uint32_t
hash_render_pass_state(const void * key)982 hash_render_pass_state(const void *key)
983 {
984 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
985 }
986
987 static bool
equals_render_pass_state(const void * a,const void * b)988 equals_render_pass_state(const void *a, const void *b)
989 {
990 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
991 }
992
993 static void
zink_flush(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,enum pipe_flush_flags flags)994 zink_flush(struct pipe_context *pctx,
995 struct pipe_fence_handle **pfence,
996 enum pipe_flush_flags flags)
997 {
998 struct zink_context *ctx = zink_context(pctx);
999
1000 struct zink_batch *batch = zink_curr_batch(ctx);
1001 flush_batch(ctx);
1002
1003 if (zink_screen(pctx->screen)->info.have_EXT_transform_feedback && ctx->num_so_targets)
1004 ctx->dirty_so_targets = true;
1005
1006 if (pfence)
1007 zink_fence_reference(zink_screen(pctx->screen),
1008 (struct zink_fence **)pfence,
1009 batch->fence);
1010
1011 /* HACK:
1012 * For some strange reason, we need to finish before presenting, or else
1013 * we start rendering on top of the back-buffer for the next frame. This
1014 * seems like a bug in the DRI-driver to me, because we really should
1015 * be properly protected by fences here, and the back-buffer should
1016 * either be swapped with the front-buffer, or blitted from. But for
1017 * some strange reason, neither of these things happen.
1018 */
1019 if (flags & PIPE_FLUSH_END_OF_FRAME)
1020 pctx->screen->fence_finish(pctx->screen, pctx,
1021 (struct pipe_fence_handle *)batch->fence,
1022 PIPE_TIMEOUT_INFINITE);
1023 }
1024
1025 void
zink_fence_wait(struct pipe_context * pctx)1026 zink_fence_wait(struct pipe_context *pctx)
1027 {
1028 struct pipe_fence_handle *fence = NULL;
1029 pctx->flush(pctx, &fence, PIPE_FLUSH_HINT_FINISH);
1030 if (fence) {
1031 pctx->screen->fence_finish(pctx->screen, NULL, fence,
1032 PIPE_TIMEOUT_INFINITE);
1033 pctx->screen->fence_reference(pctx->screen, &fence, NULL);
1034 }
1035 }
1036
1037 static void
zink_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1038 zink_flush_resource(struct pipe_context *pipe,
1039 struct pipe_resource *resource)
1040 {
1041 }
1042
1043 static void
zink_resource_copy_region(struct pipe_context * pctx,struct pipe_resource * pdst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * psrc,unsigned src_level,const struct pipe_box * src_box)1044 zink_resource_copy_region(struct pipe_context *pctx,
1045 struct pipe_resource *pdst,
1046 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1047 struct pipe_resource *psrc,
1048 unsigned src_level, const struct pipe_box *src_box)
1049 {
1050 struct zink_resource *dst = zink_resource(pdst);
1051 struct zink_resource *src = zink_resource(psrc);
1052 struct zink_context *ctx = zink_context(pctx);
1053 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1054 VkImageCopy region = {};
1055 if (util_format_get_num_planes(src->base.format) == 1 &&
1056 util_format_get_num_planes(dst->base.format) == 1) {
1057 /* If neither the calling command’s srcImage nor the calling command’s dstImage
1058 * has a multi-planar image format then the aspectMask member of srcSubresource
1059 * and dstSubresource must match
1060 *
1061 * -VkImageCopy spec
1062 */
1063 assert(src->aspect == dst->aspect);
1064 } else
1065 unreachable("planar formats not yet handled");
1066
1067 region.srcSubresource.aspectMask = src->aspect;
1068 region.srcSubresource.mipLevel = src_level;
1069 region.srcSubresource.layerCount = 1;
1070 if (src->base.array_size > 1) {
1071 region.srcSubresource.baseArrayLayer = src_box->z;
1072 region.srcSubresource.layerCount = src_box->depth;
1073 region.extent.depth = 1;
1074 } else {
1075 region.srcOffset.z = src_box->z;
1076 region.srcSubresource.layerCount = 1;
1077 region.extent.depth = src_box->depth;
1078 }
1079
1080 region.srcOffset.x = src_box->x;
1081 region.srcOffset.y = src_box->y;
1082
1083 region.dstSubresource.aspectMask = dst->aspect;
1084 region.dstSubresource.mipLevel = dst_level;
1085 if (dst->base.array_size > 1) {
1086 region.dstSubresource.baseArrayLayer = dstz;
1087 region.dstSubresource.layerCount = src_box->depth;
1088 } else {
1089 region.dstOffset.z = dstz;
1090 region.dstSubresource.layerCount = 1;
1091 }
1092
1093 region.dstOffset.x = dstx;
1094 region.dstOffset.y = dsty;
1095 region.extent.width = src_box->width;
1096 region.extent.height = src_box->height;
1097
1098 struct zink_batch *batch = zink_batch_no_rp(ctx);
1099 zink_batch_reference_resource_rw(batch, src, false);
1100 zink_batch_reference_resource_rw(batch, dst, true);
1101
1102 zink_resource_setup_transfer_layouts(batch, src, dst);
1103 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1104 dst->image, dst->layout,
1105 1, ®ion);
1106 } else if (dst->base.target == PIPE_BUFFER &&
1107 src->base.target == PIPE_BUFFER) {
1108 VkBufferCopy region;
1109 region.srcOffset = src_box->x;
1110 region.dstOffset = dstx;
1111 region.size = src_box->width;
1112
1113 struct zink_batch *batch = zink_batch_no_rp(ctx);
1114 zink_batch_reference_resource_rw(batch, src, false);
1115 zink_batch_reference_resource_rw(batch, dst, true);
1116
1117 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, ®ion);
1118 } else
1119 debug_printf("zink: TODO resource copy\n");
1120 }
1121
1122 static struct pipe_stream_output_target *
zink_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * pres,unsigned buffer_offset,unsigned buffer_size)1123 zink_create_stream_output_target(struct pipe_context *pctx,
1124 struct pipe_resource *pres,
1125 unsigned buffer_offset,
1126 unsigned buffer_size)
1127 {
1128 struct zink_so_target *t;
1129 t = CALLOC_STRUCT(zink_so_target);
1130 if (!t)
1131 return NULL;
1132
1133 /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
1134 * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
1135 * as we must for this case
1136 */
1137 t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
1138 if (!t->counter_buffer) {
1139 FREE(t);
1140 return NULL;
1141 }
1142
1143 t->base.reference.count = 1;
1144 t->base.context = pctx;
1145 pipe_resource_reference(&t->base.buffer, pres);
1146 t->base.buffer_offset = buffer_offset;
1147 t->base.buffer_size = buffer_size;
1148
1149 return &t->base;
1150 }
1151
1152 static void
zink_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * psot)1153 zink_stream_output_target_destroy(struct pipe_context *pctx,
1154 struct pipe_stream_output_target *psot)
1155 {
1156 struct zink_so_target *t = (struct zink_so_target *)psot;
1157 pipe_resource_reference(&t->counter_buffer, NULL);
1158 pipe_resource_reference(&t->base.buffer, NULL);
1159 FREE(t);
1160 }
1161
1162 static void
zink_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)1163 zink_set_stream_output_targets(struct pipe_context *pctx,
1164 unsigned num_targets,
1165 struct pipe_stream_output_target **targets,
1166 const unsigned *offsets)
1167 {
1168 struct zink_context *ctx = zink_context(pctx);
1169
1170 if (num_targets == 0) {
1171 for (unsigned i = 0; i < ctx->num_so_targets; i++)
1172 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1173 ctx->num_so_targets = 0;
1174 } else {
1175 for (unsigned i = 0; i < num_targets; i++)
1176 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1177 for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
1178 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1179 ctx->num_so_targets = num_targets;
1180
1181 /* emit memory barrier on next draw for synchronization */
1182 if (offsets[0] == (unsigned)-1)
1183 ctx->xfb_barrier = true;
1184 /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
1185 ctx->dirty_so_targets = true;
1186 }
1187 }
1188
1189 struct pipe_context *
zink_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1190 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1191 {
1192 struct zink_screen *screen = zink_screen(pscreen);
1193 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1194 if (!ctx)
1195 goto fail;
1196
1197 ctx->gfx_pipeline_state.hash = 0;
1198
1199 ctx->base.screen = pscreen;
1200 ctx->base.priv = priv;
1201
1202 ctx->base.destroy = zink_context_destroy;
1203 ctx->base.get_device_reset_status = zink_get_device_reset_status;
1204 ctx->base.set_device_reset_callback = zink_set_device_reset_callback;
1205
1206 zink_context_state_init(&ctx->base);
1207
1208 ctx->base.create_sampler_state = zink_create_sampler_state;
1209 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1210 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1211
1212 ctx->base.create_sampler_view = zink_create_sampler_view;
1213 ctx->base.set_sampler_views = zink_set_sampler_views;
1214 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1215 ctx->base.get_sample_position = zink_get_sample_position;
1216
1217 zink_program_init(ctx);
1218
1219 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1220 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1221 ctx->base.set_viewport_states = zink_set_viewport_states;
1222 ctx->base.set_scissor_states = zink_set_scissor_states;
1223 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1224 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1225 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1226 ctx->base.set_clip_state = zink_set_clip_state;
1227 ctx->base.set_blend_color = zink_set_blend_color;
1228
1229 ctx->base.set_sample_mask = zink_set_sample_mask;
1230
1231 ctx->base.clear = zink_clear;
1232 ctx->base.draw_vbo = zink_draw_vbo;
1233 ctx->base.flush = zink_flush;
1234
1235 ctx->base.resource_copy_region = zink_resource_copy_region;
1236 ctx->base.blit = zink_blit;
1237 ctx->base.create_stream_output_target = zink_create_stream_output_target;
1238 ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
1239
1240 ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
1241 ctx->base.flush_resource = zink_flush_resource;
1242 zink_context_surface_init(&ctx->base);
1243 zink_context_resource_init(&ctx->base);
1244 zink_context_query_init(&ctx->base);
1245
1246 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1247
1248 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1249 ctx->base.const_uploader = ctx->base.stream_uploader;
1250
1251 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1252 1 << PIPE_PRIM_LINES |
1253 1 << PIPE_PRIM_LINE_STRIP |
1254 1 << PIPE_PRIM_TRIANGLES |
1255 1 << PIPE_PRIM_TRIANGLE_STRIP;
1256 if (screen->have_triangle_fans)
1257 prim_hwsupport |= 1 << PIPE_PRIM_TRIANGLE_FAN;
1258
1259 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1260 if (!ctx->primconvert)
1261 goto fail;
1262
1263 ctx->blitter = util_blitter_create(&ctx->base);
1264 if (!ctx->blitter)
1265 goto fail;
1266
1267 VkCommandPoolCreateInfo cpci = {};
1268 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1269 cpci.queueFamilyIndex = screen->gfx_queue;
1270 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1271 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1272 goto fail;
1273
1274 VkCommandBufferAllocateInfo cbai = {};
1275 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1276 cbai.commandPool = ctx->cmdpool;
1277 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1278 cbai.commandBufferCount = 1;
1279
1280 VkDescriptorPoolSize sizes[] = {
1281 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
1282 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, ZINK_BATCH_DESC_SIZE},
1283 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE},
1284 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, ZINK_BATCH_DESC_SIZE},
1285 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, ZINK_BATCH_DESC_SIZE},
1286 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, ZINK_BATCH_DESC_SIZE},
1287 };
1288 VkDescriptorPoolCreateInfo dpci = {};
1289 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1290 dpci.pPoolSizes = sizes;
1291 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1292 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1293 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1294
1295 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1296 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1297 goto fail;
1298
1299 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1300 _mesa_key_pointer_equal);
1301 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1302 _mesa_hash_pointer,
1303 _mesa_key_pointer_equal);
1304 ctx->batches[i].programs = _mesa_set_create(NULL,
1305 _mesa_hash_pointer,
1306 _mesa_key_pointer_equal);
1307
1308 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views ||
1309 !ctx->batches[i].programs)
1310 goto fail;
1311
1312 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1313
1314 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1315 &ctx->batches[i].descpool) != VK_SUCCESS)
1316 goto fail;
1317
1318 ctx->batches[i].batch_id = i;
1319 }
1320
1321 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1322
1323 ctx->program_cache = _mesa_hash_table_create(NULL,
1324 hash_gfx_program,
1325 equals_gfx_program);
1326 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1327 hash_render_pass_state,
1328 equals_render_pass_state);
1329 if (!ctx->program_cache || !ctx->render_pass_cache)
1330 goto fail;
1331
1332 const uint8_t data[] = { 0 };
1333 ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
1334 PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
1335 if (!ctx->dummy_buffer)
1336 goto fail;
1337
1338 /* start the first batch */
1339 zink_start_batch(ctx, zink_curr_batch(ctx));
1340
1341 return &ctx->base;
1342
1343 fail:
1344 if (ctx) {
1345 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1346 FREE(ctx);
1347 }
1348 return NULL;
1349 }
1350