1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_state.h"
25
26 #include "zink_context.h"
27 #include "zink_format.h"
28 #include "zink_program.h"
29 #include "zink_screen.h"
30
31 #include "compiler/shader_enums.h"
32 #include "util/u_dual_blend.h"
33 #include "util/u_memory.h"
34 #include "util/u_helpers.h"
35 #include "vulkan/util/vk_format.h"
36
37 #include <math.h>
38
39 static void *
zink_create_vertex_elements_state(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)40 zink_create_vertex_elements_state(struct pipe_context *pctx,
41 unsigned num_elements,
42 const struct pipe_vertex_element *elements)
43 {
44 struct zink_screen *screen = zink_screen(pctx->screen);
45 unsigned int i;
46 struct zink_vertex_elements_state *ves = CALLOC_STRUCT(zink_vertex_elements_state);
47 if (!ves)
48 return NULL;
49 ves->hw_state.hash = _mesa_hash_pointer(ves);
50
51 int buffer_map[PIPE_MAX_ATTRIBS];
52 for (int j = 0; j < ARRAY_SIZE(buffer_map); ++j)
53 buffer_map[j] = -1;
54
55 int num_bindings = 0;
56 unsigned num_decomposed = 0;
57 uint32_t size8 = 0;
58 uint32_t size16 = 0;
59 uint32_t size32 = 0;
60 uint16_t strides[PIPE_MAX_ATTRIBS];
61 for (i = 0; i < num_elements; ++i) {
62 const struct pipe_vertex_element *elem = elements + i;
63
64 int binding = elem->vertex_buffer_index;
65 if (buffer_map[binding] < 0) {
66 ves->hw_state.binding_map[num_bindings] = binding;
67 buffer_map[binding] = num_bindings++;
68 }
69 binding = buffer_map[binding];
70
71 ves->bindings[binding].binding = binding;
72 ves->bindings[binding].inputRate = elem->instance_divisor ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
73
74 assert(!elem->instance_divisor || zink_screen(pctx->screen)->info.have_EXT_vertex_attribute_divisor);
75 if (elem->instance_divisor > screen->info.vdiv_props.maxVertexAttribDivisor)
76 debug_printf("zink: clamping instance divisor %u to %u\n", elem->instance_divisor, screen->info.vdiv_props.maxVertexAttribDivisor);
77 ves->divisor[binding] = MIN2(elem->instance_divisor, screen->info.vdiv_props.maxVertexAttribDivisor);
78
79 VkFormat format;
80 if (screen->format_props[elem->src_format].bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
81 format = zink_get_format(screen, elem->src_format);
82 else {
83 enum pipe_format new_format = zink_decompose_vertex_format(elem->src_format);
84 assert(new_format);
85 num_decomposed++;
86 assert(screen->format_props[new_format].bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT);
87 if (util_format_get_blocksize(new_format) == 4)
88 size32 |= BITFIELD_BIT(i);
89 else if (util_format_get_blocksize(new_format) == 2)
90 size16 |= BITFIELD_BIT(i);
91 else
92 size8 |= BITFIELD_BIT(i);
93 format = zink_get_format(screen, new_format);
94 unsigned size;
95 if (i < 8)
96 size = 1;
97 else if (i < 16)
98 size = 2;
99 else
100 size = 4;
101 if (util_format_get_nr_components(elem->src_format) == 4) {
102 ves->decomposed_attrs |= BITFIELD_BIT(i);
103 ves->decomposed_attrs_size = size;
104 } else {
105 ves->decomposed_attrs_without_w |= BITFIELD_BIT(i);
106 ves->decomposed_attrs_without_w_size = size;
107 }
108 ves->has_decomposed_attrs = true;
109 }
110
111 if (screen->info.have_EXT_vertex_input_dynamic_state) {
112 ves->hw_state.dynattribs[i].sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT;
113 ves->hw_state.dynattribs[i].binding = binding;
114 ves->hw_state.dynattribs[i].location = i;
115 ves->hw_state.dynattribs[i].format = format;
116 strides[binding] = elem->src_stride;
117 assert(ves->hw_state.dynattribs[i].format != VK_FORMAT_UNDEFINED);
118 ves->hw_state.dynattribs[i].offset = elem->src_offset;
119 } else {
120 ves->hw_state.attribs[i].binding = binding;
121 ves->hw_state.attribs[i].location = i;
122 ves->hw_state.attribs[i].format = format;
123 ves->hw_state.b.strides[binding] = elem->src_stride;
124 assert(ves->hw_state.attribs[i].format != VK_FORMAT_UNDEFINED);
125 ves->hw_state.attribs[i].offset = elem->src_offset;
126 ves->min_stride[binding] = MAX2(ves->min_stride[binding], elem->src_offset + vk_format_get_blocksize(format));
127 }
128 }
129 assert(num_decomposed + num_elements <= PIPE_MAX_ATTRIBS);
130 u_foreach_bit(attr_index, ves->decomposed_attrs | ves->decomposed_attrs_without_w) {
131 const struct pipe_vertex_element *elem = elements + attr_index;
132 const struct util_format_description *desc = util_format_description(elem->src_format);
133 unsigned size = 1;
134 if (size32 & BITFIELD_BIT(attr_index))
135 size = 4;
136 else if (size16 & BITFIELD_BIT(attr_index))
137 size = 2;
138 else
139 assert(size8 & BITFIELD_BIT(attr_index));
140 for (unsigned j = 1; j < desc->nr_channels; j++) {
141 if (screen->info.have_EXT_vertex_input_dynamic_state) {
142 memcpy(&ves->hw_state.dynattribs[num_elements], &ves->hw_state.dynattribs[attr_index], sizeof(VkVertexInputAttributeDescription2EXT));
143 ves->hw_state.dynattribs[num_elements].location = num_elements;
144 ves->hw_state.dynattribs[num_elements].offset += j * size;
145 } else {
146 memcpy(&ves->hw_state.attribs[num_elements], &ves->hw_state.attribs[attr_index], sizeof(VkVertexInputAttributeDescription));
147 ves->hw_state.attribs[num_elements].location = num_elements;
148 ves->hw_state.attribs[num_elements].offset += j * size;
149 }
150 num_elements++;
151 }
152 }
153 ves->hw_state.num_bindings = num_bindings;
154 ves->hw_state.num_attribs = num_elements;
155 if (screen->info.have_EXT_vertex_input_dynamic_state) {
156 for (int j = 0; j < num_bindings; ++j) {
157 ves->hw_state.dynbindings[j].sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT;
158 ves->hw_state.dynbindings[j].binding = ves->bindings[j].binding;
159 ves->hw_state.dynbindings[j].inputRate = ves->bindings[j].inputRate;
160 ves->hw_state.dynbindings[j].stride = strides[j];
161 if (ves->divisor[j])
162 ves->hw_state.dynbindings[j].divisor = ves->divisor[j];
163 else
164 ves->hw_state.dynbindings[j].divisor = 1;
165 }
166 } else {
167 for (int j = 0; j < num_bindings; ++j) {
168 ves->hw_state.b.bindings[j].binding = ves->bindings[j].binding;
169 ves->hw_state.b.bindings[j].inputRate = ves->bindings[j].inputRate;
170 if (ves->divisor[j]) {
171 ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].divisor = ves->divisor[j];
172 ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].binding = ves->bindings[j].binding;
173 ves->hw_state.b.divisors_present++;
174 }
175 }
176 }
177 return ves;
178 }
179
180 static void
zink_bind_vertex_elements_state(struct pipe_context * pctx,void * cso)181 zink_bind_vertex_elements_state(struct pipe_context *pctx,
182 void *cso)
183 {
184 struct zink_context *ctx = zink_context(pctx);
185 struct zink_gfx_pipeline_state *state = &ctx->gfx_pipeline_state;
186 zink_flush_dgc_if_enabled(ctx);
187 ctx->element_state = cso;
188 if (cso) {
189 if (state->element_state != &ctx->element_state->hw_state) {
190 ctx->vertex_state_changed = !zink_screen(pctx->screen)->info.have_EXT_vertex_input_dynamic_state;
191 ctx->vertex_buffers_dirty = ctx->element_state->hw_state.num_bindings > 0;
192 }
193 state->element_state = &ctx->element_state->hw_state;
194 if (zink_screen(pctx->screen)->optimal_keys)
195 return;
196 const struct zink_vs_key *vs = zink_get_vs_key(ctx);
197 uint32_t decomposed_attrs = 0, decomposed_attrs_without_w = 0;
198 switch (vs->size) {
199 case 1:
200 decomposed_attrs = vs->u8.decomposed_attrs;
201 decomposed_attrs_without_w = vs->u8.decomposed_attrs_without_w;
202 break;
203 case 2:
204 decomposed_attrs = vs->u16.decomposed_attrs;
205 decomposed_attrs_without_w = vs->u16.decomposed_attrs_without_w;
206 break;
207 case 4:
208 decomposed_attrs = vs->u16.decomposed_attrs;
209 decomposed_attrs_without_w = vs->u16.decomposed_attrs_without_w;
210 break;
211 }
212 if (ctx->element_state->decomposed_attrs != decomposed_attrs ||
213 ctx->element_state->decomposed_attrs_without_w != decomposed_attrs_without_w) {
214 unsigned size = MAX2(ctx->element_state->decomposed_attrs_size, ctx->element_state->decomposed_attrs_without_w_size);
215 struct zink_shader_key *key = (struct zink_shader_key *)zink_set_vs_key(ctx);
216 key->size -= 2 * key->key.vs.size;
217 switch (size) {
218 case 1:
219 key->key.vs.u8.decomposed_attrs = ctx->element_state->decomposed_attrs;
220 key->key.vs.u8.decomposed_attrs_without_w = ctx->element_state->decomposed_attrs_without_w;
221 break;
222 case 2:
223 key->key.vs.u16.decomposed_attrs = ctx->element_state->decomposed_attrs;
224 key->key.vs.u16.decomposed_attrs_without_w = ctx->element_state->decomposed_attrs_without_w;
225 break;
226 case 4:
227 key->key.vs.u32.decomposed_attrs = ctx->element_state->decomposed_attrs;
228 key->key.vs.u32.decomposed_attrs_without_w = ctx->element_state->decomposed_attrs_without_w;
229 break;
230 default: break;
231 }
232 key->key.vs.size = size;
233 key->size += 2 * size;
234 }
235 } else {
236 state->element_state = NULL;
237 ctx->vertex_buffers_dirty = false;
238 }
239 }
240
241 static void
zink_delete_vertex_elements_state(struct pipe_context * pctx,void * ves)242 zink_delete_vertex_elements_state(struct pipe_context *pctx,
243 void *ves)
244 {
245 FREE(ves);
246 }
247
248 static VkBlendFactor
blend_factor(enum pipe_blendfactor factor)249 blend_factor(enum pipe_blendfactor factor)
250 {
251 switch (factor) {
252 case PIPE_BLENDFACTOR_ONE: return VK_BLEND_FACTOR_ONE;
253 case PIPE_BLENDFACTOR_SRC_COLOR: return VK_BLEND_FACTOR_SRC_COLOR;
254 case PIPE_BLENDFACTOR_SRC_ALPHA: return VK_BLEND_FACTOR_SRC_ALPHA;
255 case PIPE_BLENDFACTOR_DST_ALPHA: return VK_BLEND_FACTOR_DST_ALPHA;
256 case PIPE_BLENDFACTOR_DST_COLOR: return VK_BLEND_FACTOR_DST_COLOR;
257 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
258 return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
259 case PIPE_BLENDFACTOR_CONST_COLOR: return VK_BLEND_FACTOR_CONSTANT_COLOR;
260 case PIPE_BLENDFACTOR_CONST_ALPHA: return VK_BLEND_FACTOR_CONSTANT_ALPHA;
261 case PIPE_BLENDFACTOR_SRC1_COLOR: return VK_BLEND_FACTOR_SRC1_COLOR;
262 case PIPE_BLENDFACTOR_SRC1_ALPHA: return VK_BLEND_FACTOR_SRC1_ALPHA;
263
264 case PIPE_BLENDFACTOR_ZERO: return VK_BLEND_FACTOR_ZERO;
265
266 case PIPE_BLENDFACTOR_INV_SRC_COLOR:
267 return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
268 case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
269 return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
270 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
271 return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
272 case PIPE_BLENDFACTOR_INV_DST_COLOR:
273 return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
274
275 case PIPE_BLENDFACTOR_INV_CONST_COLOR:
276 return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
277 case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
278 return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
279 case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
280 return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
281 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
282 return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
283 }
284 unreachable("unexpected blend factor");
285 }
286
287
288 static VkBlendOp
blend_op(enum pipe_blend_func func)289 blend_op(enum pipe_blend_func func)
290 {
291 switch (func) {
292 case PIPE_BLEND_ADD: return VK_BLEND_OP_ADD;
293 case PIPE_BLEND_SUBTRACT: return VK_BLEND_OP_SUBTRACT;
294 case PIPE_BLEND_REVERSE_SUBTRACT: return VK_BLEND_OP_REVERSE_SUBTRACT;
295 case PIPE_BLEND_MIN: return VK_BLEND_OP_MIN;
296 case PIPE_BLEND_MAX: return VK_BLEND_OP_MAX;
297 }
298 unreachable("unexpected blend function");
299 }
300
301 static VkLogicOp
logic_op(enum pipe_logicop func)302 logic_op(enum pipe_logicop func)
303 {
304 switch (func) {
305 case PIPE_LOGICOP_CLEAR: return VK_LOGIC_OP_CLEAR;
306 case PIPE_LOGICOP_NOR: return VK_LOGIC_OP_NOR;
307 case PIPE_LOGICOP_AND_INVERTED: return VK_LOGIC_OP_AND_INVERTED;
308 case PIPE_LOGICOP_COPY_INVERTED: return VK_LOGIC_OP_COPY_INVERTED;
309 case PIPE_LOGICOP_AND_REVERSE: return VK_LOGIC_OP_AND_REVERSE;
310 case PIPE_LOGICOP_INVERT: return VK_LOGIC_OP_INVERT;
311 case PIPE_LOGICOP_XOR: return VK_LOGIC_OP_XOR;
312 case PIPE_LOGICOP_NAND: return VK_LOGIC_OP_NAND;
313 case PIPE_LOGICOP_AND: return VK_LOGIC_OP_AND;
314 case PIPE_LOGICOP_EQUIV: return VK_LOGIC_OP_EQUIVALENT;
315 case PIPE_LOGICOP_NOOP: return VK_LOGIC_OP_NO_OP;
316 case PIPE_LOGICOP_OR_INVERTED: return VK_LOGIC_OP_OR_INVERTED;
317 case PIPE_LOGICOP_COPY: return VK_LOGIC_OP_COPY;
318 case PIPE_LOGICOP_OR_REVERSE: return VK_LOGIC_OP_OR_REVERSE;
319 case PIPE_LOGICOP_OR: return VK_LOGIC_OP_OR;
320 case PIPE_LOGICOP_SET: return VK_LOGIC_OP_SET;
321 }
322 unreachable("unexpected logicop function");
323 }
324
325 /* from iris */
326 static enum pipe_blendfactor
fix_blendfactor(enum pipe_blendfactor f,bool alpha_to_one)327 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
328 {
329 if (alpha_to_one) {
330 if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
331 return PIPE_BLENDFACTOR_ONE;
332
333 if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
334 return PIPE_BLENDFACTOR_ZERO;
335 }
336
337 return f;
338 }
339
340 static void *
zink_create_blend_state(struct pipe_context * pctx,const struct pipe_blend_state * blend_state)341 zink_create_blend_state(struct pipe_context *pctx,
342 const struct pipe_blend_state *blend_state)
343 {
344 struct zink_blend_state *cso = CALLOC_STRUCT(zink_blend_state);
345 if (!cso)
346 return NULL;
347 cso->hash = _mesa_hash_pointer(cso);
348
349 if (blend_state->logicop_enable) {
350 cso->logicop_enable = VK_TRUE;
351 cso->logicop_func = logic_op(blend_state->logicop_func);
352 }
353
354 /* TODO: figure out what to do with dither (nothing is probably "OK" for now,
355 * as dithering is undefined in GL
356 */
357
358 /* TODO: these are multisampling-state, and should be set there instead of
359 * here, as that's closer tied to the update-frequency
360 */
361 cso->alpha_to_coverage = blend_state->alpha_to_coverage;
362 cso->alpha_to_one = blend_state->alpha_to_one;
363 cso->num_rts = blend_state->max_rt + 1;
364
365 for (int i = 0; i < blend_state->max_rt + 1; ++i) {
366 const struct pipe_rt_blend_state *rt = blend_state->rt;
367 if (blend_state->independent_blend_enable)
368 rt = blend_state->rt + i;
369
370 VkPipelineColorBlendAttachmentState att = {0};
371
372 if (rt->blend_enable) {
373 att.blendEnable = VK_TRUE;
374 att.srcColorBlendFactor = blend_factor(fix_blendfactor(rt->rgb_src_factor, cso->alpha_to_one));
375 att.dstColorBlendFactor = blend_factor(fix_blendfactor(rt->rgb_dst_factor, cso->alpha_to_one));
376 att.colorBlendOp = blend_op(rt->rgb_func);
377 att.srcAlphaBlendFactor = blend_factor(fix_blendfactor(rt->alpha_src_factor, cso->alpha_to_one));
378 att.dstAlphaBlendFactor = blend_factor(fix_blendfactor(rt->alpha_dst_factor, cso->alpha_to_one));
379 att.alphaBlendOp = blend_op(rt->alpha_func);
380 }
381
382 if (rt->colormask & PIPE_MASK_R)
383 att.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
384 if (rt->colormask & PIPE_MASK_G)
385 att.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
386 if (rt->colormask & PIPE_MASK_B)
387 att.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
388 if (rt->colormask & PIPE_MASK_A)
389 att.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
390
391 cso->wrmask |= (rt->colormask << i);
392 if (rt->blend_enable)
393 cso->enables |= BITFIELD_BIT(i);
394
395 cso->attachments[i] = att;
396
397 cso->ds3.enables[i] = att.blendEnable;
398 cso->ds3.eq[i].alphaBlendOp = att.alphaBlendOp;
399 cso->ds3.eq[i].dstAlphaBlendFactor = att.dstAlphaBlendFactor;
400 cso->ds3.eq[i].srcAlphaBlendFactor = att.srcAlphaBlendFactor;
401 cso->ds3.eq[i].colorBlendOp = att.colorBlendOp;
402 cso->ds3.eq[i].dstColorBlendFactor = att.dstColorBlendFactor;
403 cso->ds3.eq[i].srcColorBlendFactor = att.srcColorBlendFactor;
404 cso->ds3.wrmask[i] = att.colorWriteMask;
405 }
406 cso->dual_src_blend = util_blend_state_is_dual(blend_state, 0);
407
408 return cso;
409 }
410
411 static void
zink_bind_blend_state(struct pipe_context * pctx,void * cso)412 zink_bind_blend_state(struct pipe_context *pctx, void *cso)
413 {
414 struct zink_context *ctx = zink_context(pctx);
415 struct zink_screen *screen = zink_screen(pctx->screen);
416 struct zink_gfx_pipeline_state* state = &zink_context(pctx)->gfx_pipeline_state;
417 zink_flush_dgc_if_enabled(ctx);
418 struct zink_blend_state *blend = cso;
419 struct zink_blend_state *old_blend = state->blend_state;
420
421 if (state->blend_state != cso) {
422 state->blend_state = cso;
423 if (!screen->have_full_ds3) {
424 state->blend_id = blend ? blend->hash : 0;
425 state->dirty = true;
426 }
427 bool force_dual_color_blend = screen->driconf.dual_color_blend_by_location &&
428 blend && blend->dual_src_blend && state->blend_state->attachments[0].blendEnable;
429 if (force_dual_color_blend != zink_get_fs_base_key(ctx)->force_dual_color_blend)
430 zink_set_fs_base_key(ctx)->force_dual_color_blend = force_dual_color_blend;
431 ctx->blend_state_changed = true;
432
433 if (cso && screen->have_full_ds3) {
434 #define STATE_CHECK(NAME, FLAG) \
435 if ((!old_blend || old_blend->NAME != blend->NAME)) \
436 ctx->ds3_states |= BITFIELD_BIT(ZINK_DS3_BLEND_##FLAG)
437
438 STATE_CHECK(alpha_to_coverage, A2C);
439 if (screen->info.dynamic_state3_feats.extendedDynamicState3AlphaToOneEnable) {
440 STATE_CHECK(alpha_to_one, A21);
441 }
442 STATE_CHECK(enables, ON);
443 STATE_CHECK(wrmask, WRITE);
444 if (old_blend && blend->num_rts == old_blend->num_rts) {
445 if (memcmp(blend->ds3.eq, old_blend->ds3.eq, blend->num_rts * sizeof(blend->ds3.eq[0])))
446 ctx->ds3_states |= BITFIELD_BIT(ZINK_DS3_BLEND_EQ);
447 } else {
448 ctx->ds3_states |= BITFIELD_BIT(ZINK_DS3_BLEND_EQ);
449 }
450 STATE_CHECK(logicop_enable, LOGIC_ON);
451 STATE_CHECK(logicop_func, LOGIC);
452
453 #undef STATE_CHECK
454 }
455
456 }
457 }
458
459 static void
zink_delete_blend_state(struct pipe_context * pctx,void * blend_state)460 zink_delete_blend_state(struct pipe_context *pctx, void *blend_state)
461 {
462 FREE(blend_state);
463 }
464
465 static VkCompareOp
compare_op(enum pipe_compare_func func)466 compare_op(enum pipe_compare_func func)
467 {
468 switch (func) {
469 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
470 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
471 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
472 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
473 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
474 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
475 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
476 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
477 }
478 unreachable("unexpected func");
479 }
480
481 static VkStencilOp
stencil_op(enum pipe_stencil_op op)482 stencil_op(enum pipe_stencil_op op)
483 {
484 switch (op) {
485 case PIPE_STENCIL_OP_KEEP: return VK_STENCIL_OP_KEEP;
486 case PIPE_STENCIL_OP_ZERO: return VK_STENCIL_OP_ZERO;
487 case PIPE_STENCIL_OP_REPLACE: return VK_STENCIL_OP_REPLACE;
488 case PIPE_STENCIL_OP_INCR: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
489 case PIPE_STENCIL_OP_DECR: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
490 case PIPE_STENCIL_OP_INCR_WRAP: return VK_STENCIL_OP_INCREMENT_AND_WRAP;
491 case PIPE_STENCIL_OP_DECR_WRAP: return VK_STENCIL_OP_DECREMENT_AND_WRAP;
492 case PIPE_STENCIL_OP_INVERT: return VK_STENCIL_OP_INVERT;
493 }
494 unreachable("unexpected op");
495 }
496
497 static VkStencilOpState
stencil_op_state(const struct pipe_stencil_state * src)498 stencil_op_state(const struct pipe_stencil_state *src)
499 {
500 VkStencilOpState ret;
501 ret.failOp = stencil_op(src->fail_op);
502 ret.passOp = stencil_op(src->zpass_op);
503 ret.depthFailOp = stencil_op(src->zfail_op);
504 ret.compareOp = compare_op(src->func);
505 ret.compareMask = src->valuemask;
506 ret.writeMask = src->writemask;
507 ret.reference = 0; // not used: we'll use a dynamic state for this
508 return ret;
509 }
510
511 static void *
zink_create_depth_stencil_alpha_state(struct pipe_context * pctx,const struct pipe_depth_stencil_alpha_state * depth_stencil_alpha)512 zink_create_depth_stencil_alpha_state(struct pipe_context *pctx,
513 const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
514 {
515 struct zink_depth_stencil_alpha_state *cso = CALLOC_STRUCT(zink_depth_stencil_alpha_state);
516 if (!cso)
517 return NULL;
518
519 cso->base = *depth_stencil_alpha;
520
521 if (depth_stencil_alpha->depth_enabled) {
522 cso->hw_state.depth_test = VK_TRUE;
523 cso->hw_state.depth_compare_op = compare_op(depth_stencil_alpha->depth_func);
524 }
525
526 if (depth_stencil_alpha->depth_bounds_test) {
527 cso->hw_state.depth_bounds_test = VK_TRUE;
528 cso->hw_state.min_depth_bounds = depth_stencil_alpha->depth_bounds_min;
529 cso->hw_state.max_depth_bounds = depth_stencil_alpha->depth_bounds_max;
530 }
531
532 if (depth_stencil_alpha->stencil[0].enabled) {
533 cso->hw_state.stencil_test = VK_TRUE;
534 cso->hw_state.stencil_front = stencil_op_state(depth_stencil_alpha->stencil);
535 }
536
537 if (depth_stencil_alpha->stencil[1].enabled)
538 cso->hw_state.stencil_back = stencil_op_state(depth_stencil_alpha->stencil + 1);
539 else
540 cso->hw_state.stencil_back = cso->hw_state.stencil_front;
541
542 cso->hw_state.depth_write = depth_stencil_alpha->depth_writemask;
543
544 return cso;
545 }
546
547 static void
zink_bind_depth_stencil_alpha_state(struct pipe_context * pctx,void * cso)548 zink_bind_depth_stencil_alpha_state(struct pipe_context *pctx, void *cso)
549 {
550 struct zink_context *ctx = zink_context(pctx);
551
552 zink_flush_dgc_if_enabled(ctx);
553 ctx->dsa_state = cso;
554
555 if (cso) {
556 struct zink_gfx_pipeline_state *state = &ctx->gfx_pipeline_state;
557 if (state->dyn_state1.depth_stencil_alpha_state != &ctx->dsa_state->hw_state) {
558 state->dyn_state1.depth_stencil_alpha_state = &ctx->dsa_state->hw_state;
559 state->dirty |= !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state;
560 ctx->dsa_state_changed = true;
561 }
562 }
563 if (!ctx->track_renderpasses && !ctx->blitting)
564 ctx->rp_tc_info_updated = true;
565 }
566
567 static void
zink_delete_depth_stencil_alpha_state(struct pipe_context * pctx,void * depth_stencil_alpha)568 zink_delete_depth_stencil_alpha_state(struct pipe_context *pctx,
569 void *depth_stencil_alpha)
570 {
571 FREE(depth_stencil_alpha);
572 }
573
574 static float
round_to_granularity(float value,float granularity)575 round_to_granularity(float value, float granularity)
576 {
577 return roundf(value / granularity) * granularity;
578 }
579
580 static float
line_width(float width,float granularity,const float range[2])581 line_width(float width, float granularity, const float range[2])
582 {
583 assert(granularity >= 0);
584 assert(range[0] <= range[1]);
585
586 if (granularity > 0)
587 width = round_to_granularity(width, granularity);
588
589 return CLAMP(width, range[0], range[1]);
590 }
591
592 static void *
zink_create_rasterizer_state(struct pipe_context * pctx,const struct pipe_rasterizer_state * rs_state)593 zink_create_rasterizer_state(struct pipe_context *pctx,
594 const struct pipe_rasterizer_state *rs_state)
595 {
596 struct zink_screen *screen = zink_screen(pctx->screen);
597
598 struct zink_rasterizer_state *state = CALLOC_STRUCT(zink_rasterizer_state);
599 if (!state)
600 return NULL;
601
602 state->base = *rs_state;
603 state->base.line_stipple_factor++;
604
605 state->hw_state.line_stipple_enable =
606 rs_state->line_stipple_enable &&
607 !screen->driver_workarounds.no_linestipple;
608
609 assert(rs_state->depth_clip_far == rs_state->depth_clip_near);
610 state->hw_state.depth_clip = rs_state->depth_clip_near;
611 state->hw_state.depth_clamp = rs_state->depth_clamp;
612 state->hw_state.pv_last = !rs_state->flatshade_first;
613 state->hw_state.clip_halfz = rs_state->clip_halfz;
614
615 assert(rs_state->fill_front <= PIPE_POLYGON_MODE_POINT);
616 if (rs_state->fill_back != rs_state->fill_front)
617 debug_printf("BUG: vulkan doesn't support different front and back fill modes\n");
618
619 if (rs_state->fill_front == PIPE_POLYGON_MODE_POINT &&
620 screen->driver_workarounds.no_hw_gl_point) {
621 state->hw_state.polygon_mode = VK_POLYGON_MODE_FILL;
622 state->cull_mode = VK_CULL_MODE_NONE;
623 } else {
624 state->hw_state.polygon_mode = rs_state->fill_front; // same values
625 state->cull_mode = rs_state->cull_face; // same bits
626 }
627
628 state->front_face = rs_state->front_ccw ?
629 VK_FRONT_FACE_COUNTER_CLOCKWISE :
630 VK_FRONT_FACE_CLOCKWISE;
631
632 state->hw_state.line_mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
633 if (rs_state->line_rectangular) {
634 if (rs_state->line_smooth &&
635 !screen->driver_workarounds.no_linesmooth)
636 state->hw_state.line_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
637 else
638 state->hw_state.line_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
639 } else {
640 state->hw_state.line_mode = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
641 }
642 state->dynamic_line_mode = state->hw_state.line_mode;
643 switch (state->hw_state.line_mode) {
644 case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT:
645 if (!screen->info.line_rast_feats.rectangularLines)
646 state->dynamic_line_mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
647 break;
648 case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT:
649 if (!screen->info.line_rast_feats.smoothLines)
650 state->dynamic_line_mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
651 break;
652 case VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT:
653 if (!screen->info.line_rast_feats.bresenhamLines)
654 state->dynamic_line_mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
655 break;
656 default: break;
657 }
658
659 if (!rs_state->line_stipple_enable) {
660 state->base.line_stipple_factor = 1;
661 state->base.line_stipple_pattern = UINT16_MAX;
662 }
663
664 state->offset_fill = util_get_offset(rs_state, rs_state->fill_front);
665 state->offset_units = rs_state->offset_units;
666 if (!rs_state->offset_units_unscaled)
667 state->offset_units *= 2;
668 state->offset_clamp = rs_state->offset_clamp;
669 state->offset_scale = rs_state->offset_scale;
670
671 state->line_width = line_width(rs_state->line_width,
672 screen->info.props.limits.lineWidthGranularity,
673 screen->info.props.limits.lineWidthRange);
674
675 return state;
676 }
677
678 static void
zink_bind_rasterizer_state(struct pipe_context * pctx,void * cso)679 zink_bind_rasterizer_state(struct pipe_context *pctx, void *cso)
680 {
681 struct zink_context *ctx = zink_context(pctx);
682 struct zink_screen *screen = zink_screen(pctx->screen);
683 struct zink_rasterizer_state *prev_state = ctx->rast_state;
684 bool point_quad_rasterization = ctx->rast_state ? ctx->rast_state->base.point_quad_rasterization : false;
685 bool scissor = ctx->rast_state ? ctx->rast_state->base.scissor : false;
686 bool pv_last = ctx->rast_state ? ctx->rast_state->hw_state.pv_last : false;
687 bool force_persample_interp = ctx->gfx_pipeline_state.force_persample_interp;
688 bool clip_halfz = ctx->rast_state ? ctx->rast_state->hw_state.clip_halfz : false;
689 bool rasterizer_discard = ctx->rast_state ? ctx->rast_state->base.rasterizer_discard : false;
690 bool half_pixel_center = ctx->rast_state ? ctx->rast_state->base.half_pixel_center : true;
691 float line_width = ctx->rast_state ? ctx->rast_state->base.line_width : 1.0;
692 zink_flush_dgc_if_enabled(ctx);
693 ctx->rast_state = cso;
694
695 if (ctx->rast_state) {
696 if (screen->info.have_EXT_provoking_vertex &&
697 pv_last != ctx->rast_state->hw_state.pv_last &&
698 /* without this prop, change in pv mode requires new rp */
699 !screen->info.pv_props.provokingVertexModePerPipeline)
700 zink_batch_no_rp(ctx);
701 memcpy(&ctx->gfx_pipeline_state.dyn_state3, &ctx->rast_state->hw_state, sizeof(struct zink_rasterizer_hw_state));
702
703 ctx->gfx_pipeline_state.dirty |= !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state3;
704 ctx->rast_state_changed = true;
705
706 if (clip_halfz != ctx->rast_state->base.clip_halfz) {
707 if (screen->info.have_EXT_depth_clip_control)
708 ctx->gfx_pipeline_state.dirty = true;
709 else
710 zink_set_last_vertex_key(ctx)->clip_halfz = ctx->rast_state->base.clip_halfz;
711 ctx->vp_state_changed = true;
712 }
713
714 if (screen->info.have_EXT_extended_dynamic_state3) {
715 #define STATE_CHECK(NAME, FLAG) \
716 if (cso && (!prev_state || prev_state->NAME != ctx->rast_state->NAME)) \
717 ctx->ds3_states |= BITFIELD_BIT(ZINK_DS3_RAST_##FLAG)
718
719 if (!screen->driver_workarounds.no_linestipple) {
720 if (ctx->rast_state->base.line_stipple_enable) {
721 STATE_CHECK(base.line_stipple_factor, STIPPLE);
722 STATE_CHECK(base.line_stipple_pattern, STIPPLE);
723 } else {
724 ctx->ds3_states &= ~BITFIELD_BIT(ZINK_DS3_RAST_STIPPLE);
725 }
726 if (screen->info.dynamic_state3_feats.extendedDynamicState3LineStippleEnable) {
727 STATE_CHECK(hw_state.line_stipple_enable, STIPPLE_ON);
728 }
729 }
730 STATE_CHECK(hw_state.depth_clip, CLIP);
731 STATE_CHECK(hw_state.depth_clamp, CLAMP);
732 STATE_CHECK(hw_state.polygon_mode, POLYGON);
733 STATE_CHECK(hw_state.clip_halfz, HALFZ);
734 STATE_CHECK(hw_state.pv_last, PV);
735 STATE_CHECK(dynamic_line_mode, LINE);
736
737 #undef STATE_CHECK
738 }
739
740 if (fabs(ctx->rast_state->base.line_width - line_width) > FLT_EPSILON)
741 ctx->line_width_changed = true;
742
743 bool lower_gl_point = screen->driver_workarounds.no_hw_gl_point;
744 lower_gl_point &= ctx->rast_state->base.fill_front == PIPE_POLYGON_MODE_POINT;
745 if (zink_get_gs_key(ctx)->lower_gl_point != lower_gl_point)
746 zink_set_gs_key(ctx)->lower_gl_point = lower_gl_point;
747
748 if (ctx->gfx_pipeline_state.dyn_state1.front_face != ctx->rast_state->front_face) {
749 ctx->gfx_pipeline_state.dyn_state1.front_face = ctx->rast_state->front_face;
750 ctx->gfx_pipeline_state.dirty |= !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state;
751 }
752 if (ctx->gfx_pipeline_state.dyn_state1.cull_mode != ctx->rast_state->cull_mode) {
753 ctx->gfx_pipeline_state.dyn_state1.cull_mode = ctx->rast_state->cull_mode;
754 ctx->gfx_pipeline_state.dirty |= !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state;
755 }
756 if (!ctx->primitives_generated_active)
757 zink_set_rasterizer_discard(ctx, false);
758 else if (rasterizer_discard != ctx->rast_state->base.rasterizer_discard)
759 zink_set_null_fs(ctx);
760
761 if (ctx->rast_state->base.point_quad_rasterization ||
762 ctx->rast_state->base.point_quad_rasterization != point_quad_rasterization)
763 zink_set_fs_point_coord_key(ctx);
764 if (ctx->rast_state->base.scissor != scissor)
765 ctx->scissor_changed = true;
766
767 if (ctx->rast_state->base.force_persample_interp != force_persample_interp) {
768 zink_set_fs_base_key(ctx)->force_persample_interp = ctx->rast_state->base.force_persample_interp;
769 ctx->gfx_pipeline_state.dirty = true;
770 }
771 ctx->gfx_pipeline_state.force_persample_interp = ctx->rast_state->base.force_persample_interp;
772
773 if (ctx->rast_state->base.half_pixel_center != half_pixel_center)
774 ctx->vp_state_changed = true;
775
776 if (!screen->optimal_keys)
777 zink_update_gs_key_rectangular_line(ctx);
778 }
779 }
780
781 static void
zink_delete_rasterizer_state(struct pipe_context * pctx,void * rs_state)782 zink_delete_rasterizer_state(struct pipe_context *pctx, void *rs_state)
783 {
784 FREE(rs_state);
785 }
786
787 struct pipe_vertex_state *
zink_create_vertex_state(struct pipe_screen * pscreen,struct pipe_vertex_buffer * buffer,const struct pipe_vertex_element * elements,unsigned num_elements,struct pipe_resource * indexbuf,uint32_t full_velem_mask)788 zink_create_vertex_state(struct pipe_screen *pscreen,
789 struct pipe_vertex_buffer *buffer,
790 const struct pipe_vertex_element *elements,
791 unsigned num_elements,
792 struct pipe_resource *indexbuf,
793 uint32_t full_velem_mask)
794 {
795 struct zink_vertex_state *zstate = CALLOC_STRUCT(zink_vertex_state);
796 if (!zstate) {
797 mesa_loge("ZINK: failed to allocate zstate!");
798 return NULL;
799 }
800
801 util_init_pipe_vertex_state(pscreen, buffer, elements, num_elements, indexbuf, full_velem_mask,
802 &zstate->b);
803
804 /* Initialize the vertex element state in state->element.
805 * Do it by creating a vertex element state object and copying it there.
806 */
807 struct zink_context ctx;
808 ctx.base.screen = pscreen;
809 struct zink_vertex_elements_state *elems = zink_create_vertex_elements_state(&ctx.base, num_elements, elements);
810 zstate->velems = *elems;
811 zink_delete_vertex_elements_state(&ctx.base, elems);
812
813 return &zstate->b;
814 }
815
816 void
zink_vertex_state_destroy(struct pipe_screen * pscreen,struct pipe_vertex_state * vstate)817 zink_vertex_state_destroy(struct pipe_screen *pscreen, struct pipe_vertex_state *vstate)
818 {
819 pipe_vertex_buffer_unreference(&vstate->input.vbuffer);
820 pipe_resource_reference(&vstate->input.indexbuf, NULL);
821 FREE(vstate);
822 }
823
824 struct pipe_vertex_state *
zink_cache_create_vertex_state(struct pipe_screen * pscreen,struct pipe_vertex_buffer * buffer,const struct pipe_vertex_element * elements,unsigned num_elements,struct pipe_resource * indexbuf,uint32_t full_velem_mask)825 zink_cache_create_vertex_state(struct pipe_screen *pscreen,
826 struct pipe_vertex_buffer *buffer,
827 const struct pipe_vertex_element *elements,
828 unsigned num_elements,
829 struct pipe_resource *indexbuf,
830 uint32_t full_velem_mask)
831 {
832 struct zink_screen *screen = zink_screen(pscreen);
833
834 return util_vertex_state_cache_get(pscreen, buffer, elements, num_elements, indexbuf,
835 full_velem_mask, &screen->vertex_state_cache);
836 }
837
838 void
zink_cache_vertex_state_destroy(struct pipe_screen * pscreen,struct pipe_vertex_state * vstate)839 zink_cache_vertex_state_destroy(struct pipe_screen *pscreen, struct pipe_vertex_state *vstate)
840 {
841 struct zink_screen *screen = zink_screen(pscreen);
842
843 util_vertex_state_destroy(pscreen, &screen->vertex_state_cache, vstate);
844 }
845
846 void
zink_context_state_init(struct pipe_context * pctx)847 zink_context_state_init(struct pipe_context *pctx)
848 {
849 pctx->create_vertex_elements_state = zink_create_vertex_elements_state;
850 pctx->bind_vertex_elements_state = zink_bind_vertex_elements_state;
851 pctx->delete_vertex_elements_state = zink_delete_vertex_elements_state;
852
853 pctx->create_blend_state = zink_create_blend_state;
854 pctx->bind_blend_state = zink_bind_blend_state;
855 pctx->delete_blend_state = zink_delete_blend_state;
856
857 pctx->create_depth_stencil_alpha_state = zink_create_depth_stencil_alpha_state;
858 pctx->bind_depth_stencil_alpha_state = zink_bind_depth_stencil_alpha_state;
859 pctx->delete_depth_stencil_alpha_state = zink_delete_depth_stencil_alpha_state;
860
861 pctx->create_rasterizer_state = zink_create_rasterizer_state;
862 pctx->bind_rasterizer_state = zink_bind_rasterizer_state;
863 pctx->delete_rasterizer_state = zink_delete_rasterizer_state;
864 }
865