• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "anv_private.h"
25 
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28 
29 #include "common/gen_l3_config.h"
30 #include "common/gen_sample_positions.h"
31 #include "vk_util.h"
32 #include "vk_format_info.h"
33 
34 static uint32_t
vertex_element_comp_control(enum isl_format format,unsigned comp)35 vertex_element_comp_control(enum isl_format format, unsigned comp)
36 {
37    uint8_t bits;
38    switch (comp) {
39    case 0: bits = isl_format_layouts[format].channels.r.bits; break;
40    case 1: bits = isl_format_layouts[format].channels.g.bits; break;
41    case 2: bits = isl_format_layouts[format].channels.b.bits; break;
42    case 3: bits = isl_format_layouts[format].channels.a.bits; break;
43    default: unreachable("Invalid component");
44    }
45 
46    /*
47     * Take in account hardware restrictions when dealing with 64-bit floats.
48     *
49     * From Broadwell spec, command reference structures, page 586:
50     *  "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
51     *   64-bit components are stored * in the URB without any conversion. In
52     *   this case, vertex elements must be written as 128 or 256 bits, with
53     *   VFCOMP_STORE_0 being used to pad the output as required. E.g., if
54     *   R64_PASSTHRU is used to copy a 64-bit Red component into the URB,
55     *   Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3
56     *   set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or
57     *   Components 1-3 must be specified as VFCOMP_STORE_0 in order to output
58     *   a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires
59     *   Component 3 to be specified as VFCOMP_STORE_0 in order to output a
60     *   256-bit vertex element."
61     */
62    if (bits) {
63       return VFCOMP_STORE_SRC;
64    } else if (comp >= 2 &&
65               !isl_format_layouts[format].channels.b.bits &&
66               isl_format_layouts[format].channels.r.type == ISL_RAW) {
67       /* When emitting 64-bit attributes, we need to write either 128 or 256
68        * bit chunks, using VFCOMP_NOSTORE when not writing the chunk, and
69        * VFCOMP_STORE_0 to pad the written chunk */
70       return VFCOMP_NOSTORE;
71    } else if (comp < 3 ||
72               isl_format_layouts[format].channels.r.type == ISL_RAW) {
73       /* Note we need to pad with value 0, not 1, due hardware restrictions
74        * (see comment above) */
75       return VFCOMP_STORE_0;
76    } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
77             isl_format_layouts[format].channels.r.type == ISL_SINT) {
78       assert(comp == 3);
79       return VFCOMP_STORE_1_INT;
80    } else {
81       assert(comp == 3);
82       return VFCOMP_STORE_1_FP;
83    }
84 }
85 
86 static void
emit_vertex_input(struct anv_pipeline * pipeline,const VkPipelineVertexInputStateCreateInfo * info)87 emit_vertex_input(struct anv_pipeline *pipeline,
88                   const VkPipelineVertexInputStateCreateInfo *info)
89 {
90    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
91 
92    /* Pull inputs_read out of the VS prog data */
93    const uint64_t inputs_read = vs_prog_data->inputs_read;
94    const uint64_t double_inputs_read = vs_prog_data->double_inputs_read;
95    assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
96    const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
97    const uint32_t elements_double = double_inputs_read >> VERT_ATTRIB_GENERIC0;
98    const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
99                                 vs_prog_data->uses_instanceid ||
100                                 vs_prog_data->uses_basevertex ||
101                                 vs_prog_data->uses_baseinstance;
102 
103    uint32_t elem_count = __builtin_popcount(elements) -
104       __builtin_popcount(elements_double) / 2;
105 
106    const uint32_t total_elems =
107       elem_count + needs_svgs_elem + vs_prog_data->uses_drawid;
108    if (total_elems == 0)
109       return;
110 
111    uint32_t *p;
112 
113    const uint32_t num_dwords = 1 + total_elems * 2;
114    p = anv_batch_emitn(&pipeline->batch, num_dwords,
115                        GENX(3DSTATE_VERTEX_ELEMENTS));
116    if (!p)
117       return;
118    memset(p + 1, 0, (num_dwords - 1) * 4);
119 
120    for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
121       const VkVertexInputAttributeDescription *desc =
122          &info->pVertexAttributeDescriptions[i];
123       enum isl_format format = anv_get_isl_format(&pipeline->device->info,
124                                                   desc->format,
125                                                   VK_IMAGE_ASPECT_COLOR_BIT,
126                                                   VK_IMAGE_TILING_LINEAR);
127 
128       assert(desc->binding < MAX_VBS);
129 
130       if ((elements & (1 << desc->location)) == 0)
131          continue; /* Binding unused */
132 
133       uint32_t slot =
134          __builtin_popcount(elements & ((1 << desc->location) - 1)) -
135          DIV_ROUND_UP(__builtin_popcount(elements_double &
136                                         ((1 << desc->location) -1)), 2);
137 
138       struct GENX(VERTEX_ELEMENT_STATE) element = {
139          .VertexBufferIndex = desc->binding,
140          .Valid = true,
141          .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) format,
142          .EdgeFlagEnable = false,
143          .SourceElementOffset = desc->offset,
144          .Component0Control = vertex_element_comp_control(format, 0),
145          .Component1Control = vertex_element_comp_control(format, 1),
146          .Component2Control = vertex_element_comp_control(format, 2),
147          .Component3Control = vertex_element_comp_control(format, 3),
148       };
149       GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element);
150 
151 #if GEN_GEN >= 8
152       /* On Broadwell and later, we have a separate VF_INSTANCING packet
153        * that controls instancing.  On Haswell and prior, that's part of
154        * VERTEX_BUFFER_STATE which we emit later.
155        */
156       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
157          vfi.InstancingEnable = pipeline->instancing_enable[desc->binding];
158          vfi.VertexElementIndex = slot;
159          /* Our implementation of VK_KHX_multiview uses instancing to draw
160           * the different views.  If the client asks for instancing, we
161           * need to use the Instance Data Step Rate to ensure that we
162           * repeat the client's per-instance data once for each view.
163           */
164          vfi.InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass);
165       }
166 #endif
167    }
168 
169    const uint32_t id_slot = elem_count;
170    if (needs_svgs_elem) {
171       /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
172        *    "Within a VERTEX_ELEMENT_STATE structure, if a Component
173        *    Control field is set to something other than VFCOMP_STORE_SRC,
174        *    no higher-numbered Component Control fields may be set to
175        *    VFCOMP_STORE_SRC"
176        *
177        * This means, that if we have BaseInstance, we need BaseVertex as
178        * well.  Just do all or nothing.
179        */
180       uint32_t base_ctrl = (vs_prog_data->uses_basevertex ||
181                             vs_prog_data->uses_baseinstance) ?
182                            VFCOMP_STORE_SRC : VFCOMP_STORE_0;
183 
184       struct GENX(VERTEX_ELEMENT_STATE) element = {
185          .VertexBufferIndex = ANV_SVGS_VB_INDEX,
186          .Valid = true,
187          .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32G32_UINT,
188          .Component0Control = base_ctrl,
189          .Component1Control = base_ctrl,
190 #if GEN_GEN >= 8
191          .Component2Control = VFCOMP_STORE_0,
192          .Component3Control = VFCOMP_STORE_0,
193 #else
194          .Component2Control = VFCOMP_STORE_VID,
195          .Component3Control = VFCOMP_STORE_IID,
196 #endif
197       };
198       GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element);
199    }
200 
201 #if GEN_GEN >= 8
202    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) {
203       sgvs.VertexIDEnable              = vs_prog_data->uses_vertexid;
204       sgvs.VertexIDComponentNumber     = 2;
205       sgvs.VertexIDElementOffset       = id_slot;
206       sgvs.InstanceIDEnable            = vs_prog_data->uses_instanceid;
207       sgvs.InstanceIDComponentNumber   = 3;
208       sgvs.InstanceIDElementOffset     = id_slot;
209    }
210 #endif
211 
212    const uint32_t drawid_slot = elem_count + needs_svgs_elem;
213    if (vs_prog_data->uses_drawid) {
214       struct GENX(VERTEX_ELEMENT_STATE) element = {
215          .VertexBufferIndex = ANV_DRAWID_VB_INDEX,
216          .Valid = true,
217          .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32_UINT,
218          .Component0Control = VFCOMP_STORE_SRC,
219          .Component1Control = VFCOMP_STORE_0,
220          .Component2Control = VFCOMP_STORE_0,
221          .Component3Control = VFCOMP_STORE_0,
222       };
223       GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
224                                       &p[1 + drawid_slot * 2],
225                                       &element);
226 
227 #if GEN_GEN >= 8
228       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
229          vfi.VertexElementIndex = drawid_slot;
230       }
231 #endif
232    }
233 }
234 
235 void
genX(emit_urb_setup)236 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
237                      const struct gen_l3_config *l3_config,
238                      VkShaderStageFlags active_stages,
239                      const unsigned entry_size[4])
240 {
241    const struct gen_device_info *devinfo = &device->info;
242 #if GEN_IS_HASWELL
243    const unsigned push_constant_kb = devinfo->gt == 3 ? 32 : 16;
244 #else
245    const unsigned push_constant_kb = GEN_GEN >= 8 ? 32 : 16;
246 #endif
247 
248    const unsigned urb_size_kb = gen_get_l3_config_urb_size(devinfo, l3_config);
249 
250    unsigned entries[4];
251    unsigned start[4];
252    gen_get_urb_config(devinfo,
253                       1024 * push_constant_kb, 1024 * urb_size_kb,
254                       active_stages &
255                          VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
256                       active_stages & VK_SHADER_STAGE_GEOMETRY_BIT,
257                       entry_size, entries, start);
258 
259 #if GEN_GEN == 7 && !GEN_IS_HASWELL
260    /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
261     *
262     *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
263     *    needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
264     *    3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
265     *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL
266     *    needs to be sent before any combination of VS associated 3DSTATE."
267     */
268    anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) {
269       pc.DepthStallEnable  = true;
270       pc.PostSyncOperation = WriteImmediateData;
271       pc.Address           = (struct anv_address) { &device->workaround_bo, 0 };
272    }
273 #endif
274 
275    for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
276       anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
277          urb._3DCommandSubOpcode      += i;
278          urb.VSURBStartingAddress      = start[i];
279          urb.VSURBEntryAllocationSize  = entry_size[i] - 1;
280          urb.VSNumberofURBEntries      = entries[i];
281       }
282    }
283 }
284 
285 static void
emit_urb_setup(struct anv_pipeline * pipeline)286 emit_urb_setup(struct anv_pipeline *pipeline)
287 {
288    unsigned entry_size[4];
289    for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
290       const struct brw_vue_prog_data *prog_data =
291          !anv_pipeline_has_stage(pipeline, i) ? NULL :
292          (const struct brw_vue_prog_data *) pipeline->shaders[i]->prog_data;
293 
294       entry_size[i] = prog_data ? prog_data->urb_entry_size : 1;
295    }
296 
297    genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
298                         pipeline->urb.l3_config,
299                         pipeline->active_stages, entry_size);
300 }
301 
302 static void
emit_3dstate_sbe(struct anv_pipeline * pipeline)303 emit_3dstate_sbe(struct anv_pipeline *pipeline)
304 {
305    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
306 
307    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
308       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
309 #if GEN_GEN >= 8
310       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe);
311 #endif
312       return;
313    }
314 
315    const struct brw_vue_map *fs_input_map =
316       &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
317 
318    struct GENX(3DSTATE_SBE) sbe = {
319       GENX(3DSTATE_SBE_header),
320       .AttributeSwizzleEnable = true,
321       .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
322       .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
323       .ConstantInterpolationEnable = wm_prog_data->flat_inputs,
324    };
325 
326 #if GEN_GEN >= 9
327    for (unsigned i = 0; i < 32; i++)
328       sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
329 #endif
330 
331 #if GEN_GEN >= 8
332    /* On Broadwell, they broke 3DSTATE_SBE into two packets */
333    struct GENX(3DSTATE_SBE_SWIZ) swiz = {
334       GENX(3DSTATE_SBE_SWIZ_header),
335    };
336 #else
337 #  define swiz sbe
338 #endif
339 
340    /* Skip the VUE header and position slots by default */
341    unsigned urb_entry_read_offset = 1;
342    int max_source_attr = 0;
343    for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
344       int input_index = wm_prog_data->urb_setup[attr];
345 
346       if (input_index < 0)
347          continue;
348 
349       /* gl_Layer is stored in the VUE header */
350       if (attr == VARYING_SLOT_LAYER) {
351          urb_entry_read_offset = 0;
352          continue;
353       }
354 
355       if (attr == VARYING_SLOT_PNTC) {
356          sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
357          continue;
358       }
359 
360       const int slot = fs_input_map->varying_to_slot[attr];
361 
362       if (input_index >= 16)
363          continue;
364 
365       if (slot == -1) {
366          /* This attribute does not exist in the VUE--that means that the
367           * vertex shader did not write to it.  It could be that it's a
368           * regular varying read by the fragment shader but not written by
369           * the vertex shader or it's gl_PrimitiveID. In the first case the
370           * value is undefined, in the second it needs to be
371           * gl_PrimitiveID.
372           */
373          swiz.Attribute[input_index].ConstantSource = PRIM_ID;
374          swiz.Attribute[input_index].ComponentOverrideX = true;
375          swiz.Attribute[input_index].ComponentOverrideY = true;
376          swiz.Attribute[input_index].ComponentOverrideZ = true;
377          swiz.Attribute[input_index].ComponentOverrideW = true;
378       } else {
379          /* We have to subtract two slots to accout for the URB entry output
380           * read offset in the VS and GS stages.
381           */
382          const int source_attr = slot - 2 * urb_entry_read_offset;
383          assert(source_attr >= 0 && source_attr < 32);
384          max_source_attr = MAX2(max_source_attr, source_attr);
385          swiz.Attribute[input_index].SourceAttribute = source_attr;
386       }
387    }
388 
389    sbe.VertexURBEntryReadOffset = urb_entry_read_offset;
390    sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
391 #if GEN_GEN >= 8
392    sbe.ForceVertexURBEntryReadOffset = true;
393    sbe.ForceVertexURBEntryReadLength = true;
394 #endif
395 
396    uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
397                                         GENX(3DSTATE_SBE_length));
398    if (!dw)
399       return;
400    GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
401 
402 #if GEN_GEN >= 8
403    dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
404    if (!dw)
405       return;
406    GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
407 #endif
408 }
409 
410 static const uint32_t vk_to_gen_cullmode[] = {
411    [VK_CULL_MODE_NONE]                       = CULLMODE_NONE,
412    [VK_CULL_MODE_FRONT_BIT]                  = CULLMODE_FRONT,
413    [VK_CULL_MODE_BACK_BIT]                   = CULLMODE_BACK,
414    [VK_CULL_MODE_FRONT_AND_BACK]             = CULLMODE_BOTH
415 };
416 
417 static const uint32_t vk_to_gen_fillmode[] = {
418    [VK_POLYGON_MODE_FILL]                    = FILL_MODE_SOLID,
419    [VK_POLYGON_MODE_LINE]                    = FILL_MODE_WIREFRAME,
420    [VK_POLYGON_MODE_POINT]                   = FILL_MODE_POINT,
421 };
422 
423 static const uint32_t vk_to_gen_front_face[] = {
424    [VK_FRONT_FACE_COUNTER_CLOCKWISE]         = 1,
425    [VK_FRONT_FACE_CLOCKWISE]                 = 0
426 };
427 
428 static void
emit_rs_state(struct anv_pipeline * pipeline,const VkPipelineRasterizationStateCreateInfo * rs_info,const VkPipelineMultisampleStateCreateInfo * ms_info,const struct anv_render_pass * pass,const struct anv_subpass * subpass)429 emit_rs_state(struct anv_pipeline *pipeline,
430               const VkPipelineRasterizationStateCreateInfo *rs_info,
431               const VkPipelineMultisampleStateCreateInfo *ms_info,
432               const struct anv_render_pass *pass,
433               const struct anv_subpass *subpass)
434 {
435    struct GENX(3DSTATE_SF) sf = {
436       GENX(3DSTATE_SF_header),
437    };
438 
439    sf.ViewportTransformEnable = true;
440    sf.StatisticsEnable = true;
441    sf.TriangleStripListProvokingVertexSelect = 0;
442    sf.LineStripListProvokingVertexSelect = 0;
443    sf.TriangleFanProvokingVertexSelect = 1;
444 
445    const struct brw_vue_prog_data *last_vue_prog_data =
446       anv_pipeline_get_last_vue_prog_data(pipeline);
447 
448    if (last_vue_prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
449       sf.PointWidthSource = Vertex;
450    } else {
451       sf.PointWidthSource = State;
452       sf.PointWidth = 1.0;
453    }
454 
455 #if GEN_GEN >= 8
456    struct GENX(3DSTATE_RASTER) raster = {
457       GENX(3DSTATE_RASTER_header),
458    };
459 #else
460 #  define raster sf
461 #endif
462 
463    /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
464     * "Multisample Modes State".
465     */
466 #if GEN_GEN >= 8
467    raster.DXMultisampleRasterizationEnable = true;
468    /* NOTE: 3DSTATE_RASTER::ForcedSampleCount affects the BDW and SKL PMA fix
469     * computations.  If we ever set this bit to a different value, they will
470     * need to be updated accordingly.
471     */
472    raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
473    raster.ForceMultisampling = false;
474 #else
475    raster.MultisampleRasterizationMode =
476       (ms_info && ms_info->rasterizationSamples > 1) ?
477       MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
478 #endif
479 
480    raster.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
481    raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
482    raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
483    raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
484    raster.ScissorRectangleEnable = true;
485 
486 #if GEN_GEN >= 9
487    /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
488    raster.ViewportZFarClipTestEnable = !pipeline->depth_clamp_enable;
489    raster.ViewportZNearClipTestEnable = !pipeline->depth_clamp_enable;
490 #elif GEN_GEN >= 8
491    raster.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
492 #endif
493 
494    raster.GlobalDepthOffsetEnableSolid = rs_info->depthBiasEnable;
495    raster.GlobalDepthOffsetEnableWireframe = rs_info->depthBiasEnable;
496    raster.GlobalDepthOffsetEnablePoint = rs_info->depthBiasEnable;
497 
498 #if GEN_GEN == 7
499    /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it
500     * can get the depth offsets correct.
501     */
502    if (subpass->depth_stencil_attachment.attachment < pass->attachment_count) {
503       VkFormat vk_format =
504          pass->attachments[subpass->depth_stencil_attachment.attachment].format;
505       assert(vk_format_is_depth_or_stencil(vk_format));
506       if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
507          enum isl_format isl_format =
508             anv_get_isl_format(&pipeline->device->info, vk_format,
509                                VK_IMAGE_ASPECT_DEPTH_BIT,
510                                VK_IMAGE_TILING_OPTIMAL);
511          sf.DepthBufferSurfaceFormat =
512             isl_format_get_depth_format(isl_format, false);
513       }
514    }
515 #endif
516 
517 #if GEN_GEN >= 8
518    GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
519    GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
520 #else
521 #  undef raster
522    GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf);
523 #endif
524 }
525 
526 static void
emit_ms_state(struct anv_pipeline * pipeline,const VkPipelineMultisampleStateCreateInfo * info)527 emit_ms_state(struct anv_pipeline *pipeline,
528               const VkPipelineMultisampleStateCreateInfo *info)
529 {
530    uint32_t samples = 1;
531    uint32_t log2_samples = 0;
532 
533    /* From the Vulkan 1.0 spec:
534     *    If pSampleMask is NULL, it is treated as if the mask has all bits
535     *    enabled, i.e. no coverage is removed from fragments.
536     *
537     * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
538     */
539 #if GEN_GEN >= 8
540    uint32_t sample_mask = 0xffff;
541 #else
542    uint32_t sample_mask = 0xff;
543 #endif
544 
545    if (info) {
546       samples = info->rasterizationSamples;
547       log2_samples = __builtin_ffs(samples) - 1;
548    }
549 
550    if (info && info->pSampleMask)
551       sample_mask &= info->pSampleMask[0];
552 
553    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) {
554       ms.NumberofMultisamples       = log2_samples;
555 
556       ms.PixelLocation              = CENTER;
557 #if GEN_GEN >= 8
558       /* The PRM says that this bit is valid only for DX9:
559        *
560        *    SW can choose to set this bit only for DX9 API. DX10/OGL API's
561        *    should not have any effect by setting or not setting this bit.
562        */
563       ms.PixelPositionOffsetEnable  = false;
564 #else
565 
566       switch (samples) {
567       case 1:
568          GEN_SAMPLE_POS_1X(ms.Sample);
569          break;
570       case 2:
571          GEN_SAMPLE_POS_2X(ms.Sample);
572          break;
573       case 4:
574          GEN_SAMPLE_POS_4X(ms.Sample);
575          break;
576       case 8:
577          GEN_SAMPLE_POS_8X(ms.Sample);
578          break;
579       default:
580          break;
581       }
582 #endif
583    }
584 
585    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) {
586       sm.SampleMask = sample_mask;
587    }
588 }
589 
590 static const uint32_t vk_to_gen_logic_op[] = {
591    [VK_LOGIC_OP_COPY]                        = LOGICOP_COPY,
592    [VK_LOGIC_OP_CLEAR]                       = LOGICOP_CLEAR,
593    [VK_LOGIC_OP_AND]                         = LOGICOP_AND,
594    [VK_LOGIC_OP_AND_REVERSE]                 = LOGICOP_AND_REVERSE,
595    [VK_LOGIC_OP_AND_INVERTED]                = LOGICOP_AND_INVERTED,
596    [VK_LOGIC_OP_NO_OP]                       = LOGICOP_NOOP,
597    [VK_LOGIC_OP_XOR]                         = LOGICOP_XOR,
598    [VK_LOGIC_OP_OR]                          = LOGICOP_OR,
599    [VK_LOGIC_OP_NOR]                         = LOGICOP_NOR,
600    [VK_LOGIC_OP_EQUIVALENT]                  = LOGICOP_EQUIV,
601    [VK_LOGIC_OP_INVERT]                      = LOGICOP_INVERT,
602    [VK_LOGIC_OP_OR_REVERSE]                  = LOGICOP_OR_REVERSE,
603    [VK_LOGIC_OP_COPY_INVERTED]               = LOGICOP_COPY_INVERTED,
604    [VK_LOGIC_OP_OR_INVERTED]                 = LOGICOP_OR_INVERTED,
605    [VK_LOGIC_OP_NAND]                        = LOGICOP_NAND,
606    [VK_LOGIC_OP_SET]                         = LOGICOP_SET,
607 };
608 
609 static const uint32_t vk_to_gen_blend[] = {
610    [VK_BLEND_FACTOR_ZERO]                    = BLENDFACTOR_ZERO,
611    [VK_BLEND_FACTOR_ONE]                     = BLENDFACTOR_ONE,
612    [VK_BLEND_FACTOR_SRC_COLOR]               = BLENDFACTOR_SRC_COLOR,
613    [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR]     = BLENDFACTOR_INV_SRC_COLOR,
614    [VK_BLEND_FACTOR_DST_COLOR]               = BLENDFACTOR_DST_COLOR,
615    [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR]     = BLENDFACTOR_INV_DST_COLOR,
616    [VK_BLEND_FACTOR_SRC_ALPHA]               = BLENDFACTOR_SRC_ALPHA,
617    [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA]     = BLENDFACTOR_INV_SRC_ALPHA,
618    [VK_BLEND_FACTOR_DST_ALPHA]               = BLENDFACTOR_DST_ALPHA,
619    [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA]     = BLENDFACTOR_INV_DST_ALPHA,
620    [VK_BLEND_FACTOR_CONSTANT_COLOR]          = BLENDFACTOR_CONST_COLOR,
621    [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
622    [VK_BLEND_FACTOR_CONSTANT_ALPHA]          = BLENDFACTOR_CONST_ALPHA,
623    [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
624    [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE]      = BLENDFACTOR_SRC_ALPHA_SATURATE,
625    [VK_BLEND_FACTOR_SRC1_COLOR]              = BLENDFACTOR_SRC1_COLOR,
626    [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR]    = BLENDFACTOR_INV_SRC1_COLOR,
627    [VK_BLEND_FACTOR_SRC1_ALPHA]              = BLENDFACTOR_SRC1_ALPHA,
628    [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA]    = BLENDFACTOR_INV_SRC1_ALPHA,
629 };
630 
631 static const uint32_t vk_to_gen_blend_op[] = {
632    [VK_BLEND_OP_ADD]                         = BLENDFUNCTION_ADD,
633    [VK_BLEND_OP_SUBTRACT]                    = BLENDFUNCTION_SUBTRACT,
634    [VK_BLEND_OP_REVERSE_SUBTRACT]            = BLENDFUNCTION_REVERSE_SUBTRACT,
635    [VK_BLEND_OP_MIN]                         = BLENDFUNCTION_MIN,
636    [VK_BLEND_OP_MAX]                         = BLENDFUNCTION_MAX,
637 };
638 
639 static const uint32_t vk_to_gen_compare_op[] = {
640    [VK_COMPARE_OP_NEVER]                        = PREFILTEROPNEVER,
641    [VK_COMPARE_OP_LESS]                         = PREFILTEROPLESS,
642    [VK_COMPARE_OP_EQUAL]                        = PREFILTEROPEQUAL,
643    [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROPLEQUAL,
644    [VK_COMPARE_OP_GREATER]                      = PREFILTEROPGREATER,
645    [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROPNOTEQUAL,
646    [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROPGEQUAL,
647    [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROPALWAYS,
648 };
649 
650 static const uint32_t vk_to_gen_stencil_op[] = {
651    [VK_STENCIL_OP_KEEP]                         = STENCILOP_KEEP,
652    [VK_STENCIL_OP_ZERO]                         = STENCILOP_ZERO,
653    [VK_STENCIL_OP_REPLACE]                      = STENCILOP_REPLACE,
654    [VK_STENCIL_OP_INCREMENT_AND_CLAMP]          = STENCILOP_INCRSAT,
655    [VK_STENCIL_OP_DECREMENT_AND_CLAMP]          = STENCILOP_DECRSAT,
656    [VK_STENCIL_OP_INVERT]                       = STENCILOP_INVERT,
657    [VK_STENCIL_OP_INCREMENT_AND_WRAP]           = STENCILOP_INCR,
658    [VK_STENCIL_OP_DECREMENT_AND_WRAP]           = STENCILOP_DECR,
659 };
660 
661 /* This function sanitizes the VkStencilOpState by looking at the compare ops
662  * and trying to determine whether or not a given stencil op can ever actually
663  * occur.  Stencil ops which can never occur are set to VK_STENCIL_OP_KEEP.
664  * This function returns true if, after sanitation, any of the stencil ops are
665  * set to something other than VK_STENCIL_OP_KEEP.
666  */
667 static bool
sanitize_stencil_face(VkStencilOpState * face,VkCompareOp depthCompareOp)668 sanitize_stencil_face(VkStencilOpState *face,
669                       VkCompareOp depthCompareOp)
670 {
671    /* If compareOp is ALWAYS then the stencil test will never fail and failOp
672     * will never happen.  Set failOp to KEEP in this case.
673     */
674    if (face->compareOp == VK_COMPARE_OP_ALWAYS)
675       face->failOp = VK_STENCIL_OP_KEEP;
676 
677    /* If compareOp is NEVER or depthCompareOp is NEVER then one of the depth
678     * or stencil tests will fail and passOp will never happen.
679     */
680    if (face->compareOp == VK_COMPARE_OP_NEVER ||
681        depthCompareOp == VK_COMPARE_OP_NEVER)
682       face->passOp = VK_STENCIL_OP_KEEP;
683 
684    /* If compareOp is NEVER or depthCompareOp is ALWAYS then either the
685     * stencil test will fail or the depth test will pass.  In either case,
686     * depthFailOp will never happen.
687     */
688    if (face->compareOp == VK_COMPARE_OP_NEVER ||
689        depthCompareOp == VK_COMPARE_OP_ALWAYS)
690       face->depthFailOp = VK_STENCIL_OP_KEEP;
691 
692    return face->failOp != VK_STENCIL_OP_KEEP ||
693           face->depthFailOp != VK_STENCIL_OP_KEEP ||
694           face->passOp != VK_STENCIL_OP_KEEP;
695 }
696 
697 /* Intel hardware is fairly sensitive to whether or not depth/stencil writes
698  * are enabled.  In the presence of discards, it's fairly easy to get into the
699  * non-promoted case which means a fairly big performance hit.  From the Iron
700  * Lake PRM, Vol 2, pt. 1, section 8.4.3.2, "Early Depth Test Cases":
701  *
702  *    "Non-promoted depth (N) is active whenever the depth test can be done
703  *    early but it cannot determine whether or not to write source depth to
704  *    the depth buffer, therefore the depth write must be performed post pixel
705  *    shader. This includes cases where the pixel shader can kill pixels,
706  *    including via sampler chroma key, as well as cases where the alpha test
707  *    function is enabled, which kills pixels based on a programmable alpha
708  *    test. In this case, even if the depth test fails, the pixel cannot be
709  *    killed if a stencil write is indicated. Whether or not the stencil write
710  *    happens depends on whether or not the pixel is killed later. In these
711  *    cases if stencil test fails and stencil writes are off, the pixels can
712  *    also be killed early. If stencil writes are enabled, the pixels must be
713  *    treated as Computed depth (described above)."
714  *
715  * The same thing as mentioned in the stencil case can happen in the depth
716  * case as well if it thinks it writes depth but, thanks to the depth test
717  * being GL_EQUAL, the write doesn't actually matter.  A little extra work
718  * up-front to try and disable depth and stencil writes can make a big
719  * difference.
720  *
721  * Unfortunately, the way depth and stencil testing is specified, there are
722  * many case where, regardless of depth/stencil writes being enabled, nothing
723  * actually gets written due to some other bit of state being set.  This
724  * function attempts to "sanitize" the depth stencil state and disable writes
725  * and sometimes even testing whenever possible.
726  */
727 static void
sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo * state,bool * stencilWriteEnable,VkImageAspectFlags ds_aspects)728 sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state,
729                   bool *stencilWriteEnable,
730                   VkImageAspectFlags ds_aspects)
731 {
732    *stencilWriteEnable = state->stencilTestEnable;
733 
734    /* If the depth test is disabled, we won't be writing anything. */
735    if (!state->depthTestEnable)
736       state->depthWriteEnable = false;
737 
738    /* The Vulkan spec requires that if either depth or stencil is not present,
739     * the pipeline is to act as if the test silently passes.
740     */
741    if (!(ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
742       state->depthWriteEnable = false;
743       state->depthCompareOp = VK_COMPARE_OP_ALWAYS;
744    }
745 
746    if (!(ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
747       *stencilWriteEnable = false;
748       state->front.compareOp = VK_COMPARE_OP_ALWAYS;
749       state->back.compareOp = VK_COMPARE_OP_ALWAYS;
750    }
751 
752    /* If the stencil test is enabled and always fails, then we will never get
753     * to the depth test so we can just disable the depth test entirely.
754     */
755    if (state->stencilTestEnable &&
756        state->front.compareOp == VK_COMPARE_OP_NEVER &&
757        state->back.compareOp == VK_COMPARE_OP_NEVER) {
758       state->depthTestEnable = false;
759       state->depthWriteEnable = false;
760    }
761 
762    /* If depthCompareOp is EQUAL then the value we would be writing to the
763     * depth buffer is the same as the value that's already there so there's no
764     * point in writing it.
765     */
766    if (state->depthCompareOp == VK_COMPARE_OP_EQUAL)
767       state->depthWriteEnable = false;
768 
769    /* If the stencil ops are such that we don't actually ever modify the
770     * stencil buffer, we should disable writes.
771     */
772    if (!sanitize_stencil_face(&state->front, state->depthCompareOp) &&
773        !sanitize_stencil_face(&state->back, state->depthCompareOp))
774       *stencilWriteEnable = false;
775 
776    /* If the depth test always passes and we never write out depth, that's the
777     * same as if the depth test is disabled entirely.
778     */
779    if (state->depthCompareOp == VK_COMPARE_OP_ALWAYS &&
780        !state->depthWriteEnable)
781       state->depthTestEnable = false;
782 
783    /* If the stencil test always passes and we never write out stencil, that's
784     * the same as if the stencil test is disabled entirely.
785     */
786    if (state->front.compareOp == VK_COMPARE_OP_ALWAYS &&
787        state->back.compareOp == VK_COMPARE_OP_ALWAYS &&
788        !*stencilWriteEnable)
789       state->stencilTestEnable = false;
790 }
791 
792 static void
emit_ds_state(struct anv_pipeline * pipeline,const VkPipelineDepthStencilStateCreateInfo * pCreateInfo,const struct anv_render_pass * pass,const struct anv_subpass * subpass)793 emit_ds_state(struct anv_pipeline *pipeline,
794               const VkPipelineDepthStencilStateCreateInfo *pCreateInfo,
795               const struct anv_render_pass *pass,
796               const struct anv_subpass *subpass)
797 {
798 #if GEN_GEN == 7
799 #  define depth_stencil_dw pipeline->gen7.depth_stencil_state
800 #elif GEN_GEN == 8
801 #  define depth_stencil_dw pipeline->gen8.wm_depth_stencil
802 #else
803 #  define depth_stencil_dw pipeline->gen9.wm_depth_stencil
804 #endif
805 
806    if (pCreateInfo == NULL) {
807       /* We're going to OR this together with the dynamic state.  We need
808        * to make sure it's initialized to something useful.
809        */
810       pipeline->writes_stencil = false;
811       pipeline->stencil_test_enable = false;
812       pipeline->writes_depth = false;
813       pipeline->depth_test_enable = false;
814       memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
815       return;
816    }
817 
818    VkImageAspectFlags ds_aspects = 0;
819    if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
820       VkFormat depth_stencil_format =
821          pass->attachments[subpass->depth_stencil_attachment.attachment].format;
822       ds_aspects = vk_format_aspects(depth_stencil_format);
823    }
824 
825    VkPipelineDepthStencilStateCreateInfo info = *pCreateInfo;
826    sanitize_ds_state(&info, &pipeline->writes_stencil, ds_aspects);
827    pipeline->stencil_test_enable = info.stencilTestEnable;
828    pipeline->writes_depth = info.depthWriteEnable;
829    pipeline->depth_test_enable = info.depthTestEnable;
830 
831    /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
832 
833 #if GEN_GEN <= 7
834    struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
835 #else
836    struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = {
837 #endif
838       .DepthTestEnable = info.depthTestEnable,
839       .DepthBufferWriteEnable = info.depthWriteEnable,
840       .DepthTestFunction = vk_to_gen_compare_op[info.depthCompareOp],
841       .DoubleSidedStencilEnable = true,
842 
843       .StencilTestEnable = info.stencilTestEnable,
844       .StencilFailOp = vk_to_gen_stencil_op[info.front.failOp],
845       .StencilPassDepthPassOp = vk_to_gen_stencil_op[info.front.passOp],
846       .StencilPassDepthFailOp = vk_to_gen_stencil_op[info.front.depthFailOp],
847       .StencilTestFunction = vk_to_gen_compare_op[info.front.compareOp],
848       .BackfaceStencilFailOp = vk_to_gen_stencil_op[info.back.failOp],
849       .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info.back.passOp],
850       .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info.back.depthFailOp],
851       .BackfaceStencilTestFunction = vk_to_gen_compare_op[info.back.compareOp],
852    };
853 
854 #if GEN_GEN <= 7
855    GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
856 #else
857    GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, depth_stencil_dw, &depth_stencil);
858 #endif
859 }
860 
861 static void
862 emit_cb_state(struct anv_pipeline *pipeline,
863               const VkPipelineColorBlendStateCreateInfo *info,
864               const VkPipelineMultisampleStateCreateInfo *ms_info)
865 {
866    struct anv_device *device = pipeline->device;
867 
868 
869    struct GENX(BLEND_STATE) blend_state = {
870 #if GEN_GEN >= 8
871       .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
872       .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
873 #endif
874    };
875 
876    uint32_t surface_count = 0;
877    struct anv_pipeline_bind_map *map;
878    if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
879       map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
880       surface_count = map->surface_count;
881    }
882 
883    const uint32_t num_dwords = GENX(BLEND_STATE_length) +
884       GENX(BLEND_STATE_ENTRY_length) * surface_count;
885    pipeline->blend_state =
886       anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
887 
888    bool has_writeable_rt = false;
889    uint32_t *state_pos = pipeline->blend_state.map;
890    state_pos += GENX(BLEND_STATE_length);
891 #if GEN_GEN >= 8
892    struct GENX(BLEND_STATE_ENTRY) bs0 = { 0 };
893 #endif
894    for (unsigned i = 0; i < surface_count; i++) {
895       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
896 
897       /* All color attachments are at the beginning of the binding table */
898       if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
899          break;
900 
901       /* We can have at most 8 attachments */
902       assert(i < 8);
903 
904       if (info == NULL || binding->index >= info->attachmentCount) {
905          /* Default everything to disabled */
906          struct GENX(BLEND_STATE_ENTRY) entry = {
907             .WriteDisableAlpha = true,
908             .WriteDisableRed = true,
909             .WriteDisableGreen = true,
910             .WriteDisableBlue = true,
911          };
912          GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
913          state_pos += GENX(BLEND_STATE_ENTRY_length);
914          continue;
915       }
916 
917       assert(binding->binding == 0);
918       const VkPipelineColorBlendAttachmentState *a =
919          &info->pAttachments[binding->index];
920 
921       struct GENX(BLEND_STATE_ENTRY) entry = {
922 #if GEN_GEN < 8
923          .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
924          .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
925 #endif
926          .LogicOpEnable = info->logicOpEnable,
927          .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
928          .ColorBufferBlendEnable = a->blendEnable,
929          .ColorClampRange = COLORCLAMP_RTFORMAT,
930          .PreBlendColorClampEnable = true,
931          .PostBlendColorClampEnable = true,
932          .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
933          .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
934          .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
935          .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
936          .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
937          .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
938          .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
939          .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
940          .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
941          .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
942       };
943 
944       if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
945           a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
946           a->colorBlendOp != a->alphaBlendOp) {
947 #if GEN_GEN >= 8
948          blend_state.IndependentAlphaBlendEnable = true;
949 #else
950          entry.IndependentAlphaBlendEnable = true;
951 #endif
952       }
953 
954       if (a->colorWriteMask != 0)
955          has_writeable_rt = true;
956 
957       /* Our hardware applies the blend factor prior to the blend function
958        * regardless of what function is used.  Technically, this means the
959        * hardware can do MORE than GL or Vulkan specify.  However, it also
960        * means that, for MIN and MAX, we have to stomp the blend factor to
961        * ONE to make it a no-op.
962        */
963       if (a->colorBlendOp == VK_BLEND_OP_MIN ||
964           a->colorBlendOp == VK_BLEND_OP_MAX) {
965          entry.SourceBlendFactor = BLENDFACTOR_ONE;
966          entry.DestinationBlendFactor = BLENDFACTOR_ONE;
967       }
968       if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
969           a->alphaBlendOp == VK_BLEND_OP_MAX) {
970          entry.SourceAlphaBlendFactor = BLENDFACTOR_ONE;
971          entry.DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
972       }
973       GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
974       state_pos += GENX(BLEND_STATE_ENTRY_length);
975 #if GEN_GEN >= 8
976       if (i == 0)
977          bs0 = entry;
978 #endif
979    }
980 
981 #if GEN_GEN >= 8
982    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) {
983       blend.AlphaToCoverageEnable         = blend_state.AlphaToCoverageEnable;
984       blend.HasWriteableRT                = has_writeable_rt;
985       blend.ColorBufferBlendEnable        = bs0.ColorBufferBlendEnable;
986       blend.SourceAlphaBlendFactor        = bs0.SourceAlphaBlendFactor;
987       blend.DestinationAlphaBlendFactor   = bs0.DestinationAlphaBlendFactor;
988       blend.SourceBlendFactor             = bs0.SourceBlendFactor;
989       blend.DestinationBlendFactor        = bs0.DestinationBlendFactor;
990       blend.AlphaTestEnable               = false;
991       blend.IndependentAlphaBlendEnable   =
992          blend_state.IndependentAlphaBlendEnable;
993    }
994 #else
995    (void)has_writeable_rt;
996 #endif
997 
998    GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
999    anv_state_flush(device, pipeline->blend_state);
1000 
1001    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
1002       bsp.BlendStatePointer      = pipeline->blend_state.offset;
1003 #if GEN_GEN >= 8
1004       bsp.BlendStatePointerValid = true;
1005 #endif
1006    }
1007 }
1008 
1009 static void
1010 emit_3dstate_clip(struct anv_pipeline *pipeline,
1011                   const VkPipelineViewportStateCreateInfo *vp_info,
1012                   const VkPipelineRasterizationStateCreateInfo *rs_info)
1013 {
1014    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1015    (void) wm_prog_data;
1016    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
1017       clip.ClipEnable               = true;
1018       clip.StatisticsEnable         = true;
1019       clip.EarlyCullEnable          = true;
1020       clip.APIMode                  = APIMODE_D3D,
1021       clip.ViewportXYClipTestEnable = true;
1022 
1023       clip.ClipMode = CLIPMODE_NORMAL;
1024 
1025       clip.TriangleStripListProvokingVertexSelect = 0;
1026       clip.LineStripListProvokingVertexSelect     = 0;
1027       clip.TriangleFanProvokingVertexSelect       = 1;
1028 
1029       clip.MinimumPointWidth = 0.125;
1030       clip.MaximumPointWidth = 255.875;
1031 
1032       const struct brw_vue_prog_data *last =
1033          anv_pipeline_get_last_vue_prog_data(pipeline);
1034 
1035       /* From the Vulkan 1.0.45 spec:
1036        *
1037        *    "If the last active vertex processing stage shader entry point's
1038        *    interface does not include a variable decorated with
1039        *    ViewportIndex, then the first viewport is used."
1040        */
1041       if (vp_info && (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT)) {
1042          clip.MaximumVPIndex = vp_info->viewportCount - 1;
1043       } else {
1044          clip.MaximumVPIndex = 0;
1045       }
1046 
1047       /* From the Vulkan 1.0.45 spec:
1048        *
1049        *    "If the last active vertex processing stage shader entry point's
1050        *    interface does not include a variable decorated with Layer, then
1051        *    the first layer is used."
1052        */
1053       clip.ForceZeroRTAIndexEnable =
1054          !(last->vue_map.slots_valid & VARYING_BIT_LAYER);
1055 
1056 #if GEN_GEN == 7
1057       clip.FrontWinding            = vk_to_gen_front_face[rs_info->frontFace];
1058       clip.CullMode                = vk_to_gen_cullmode[rs_info->cullMode];
1059       clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
1060       if (last) {
1061          clip.UserClipDistanceClipTestEnableBitmask = last->clip_distance_mask;
1062          clip.UserClipDistanceCullTestEnableBitmask = last->cull_distance_mask;
1063       }
1064 #else
1065       clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
1066          (wm_prog_data->barycentric_interp_modes &
1067           BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) != 0 : 0;
1068 #endif
1069    }
1070 }
1071 
1072 static void
1073 emit_3dstate_streamout(struct anv_pipeline *pipeline,
1074                        const VkPipelineRasterizationStateCreateInfo *rs_info)
1075 {
1076    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) {
1077       so.RenderingDisable = rs_info->rasterizerDiscardEnable;
1078    }
1079 }
1080 
1081 static uint32_t
1082 get_sampler_count(const struct anv_shader_bin *bin)
1083 {
1084    uint32_t count_by_4 = DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
1085 
1086    /* We can potentially have way more than 32 samplers and that's ok.
1087     * However, the 3DSTATE_XS packets only have 3 bits to specify how
1088     * many to pre-fetch and all values above 4 are marked reserved.
1089     */
1090    return MIN2(count_by_4, 4);
1091 }
1092 
1093 static uint32_t
1094 get_binding_table_entry_count(const struct anv_shader_bin *bin)
1095 {
1096    return DIV_ROUND_UP(bin->bind_map.surface_count, 32);
1097 }
1098 
1099 static struct anv_address
1100 get_scratch_address(struct anv_pipeline *pipeline,
1101                     gl_shader_stage stage,
1102                     const struct anv_shader_bin *bin)
1103 {
1104    return (struct anv_address) {
1105       .bo = anv_scratch_pool_alloc(pipeline->device,
1106                                    &pipeline->device->scratch_pool,
1107                                    stage, bin->prog_data->total_scratch),
1108       .offset = 0,
1109    };
1110 }
1111 
1112 static uint32_t
1113 get_scratch_space(const struct anv_shader_bin *bin)
1114 {
1115    return ffs(bin->prog_data->total_scratch / 2048);
1116 }
1117 
1118 static void
1119 emit_3dstate_vs(struct anv_pipeline *pipeline)
1120 {
1121    const struct gen_device_info *devinfo = &pipeline->device->info;
1122    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1123    const struct anv_shader_bin *vs_bin =
1124       pipeline->shaders[MESA_SHADER_VERTEX];
1125 
1126    assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
1127 
1128    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
1129       vs.Enable               = true;
1130       vs.StatisticsEnable     = true;
1131       vs.KernelStartPointer   = vs_bin->kernel.offset;
1132 #if GEN_GEN >= 8
1133       vs.SIMD8DispatchEnable  =
1134          vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
1135 #endif
1136 
1137       assert(!vs_prog_data->base.base.use_alt_mode);
1138       vs.SingleVertexDispatch       = false;
1139       vs.VectorMaskEnable           = false;
1140       vs.SamplerCount               = get_sampler_count(vs_bin);
1141       vs.BindingTableEntryCount     = get_binding_table_entry_count(vs_bin);
1142       vs.FloatingPointMode          = IEEE754;
1143       vs.IllegalOpcodeExceptionEnable = false;
1144       vs.SoftwareExceptionEnable    = false;
1145       vs.MaximumNumberofThreads     = devinfo->max_vs_threads - 1;
1146       vs.VertexCacheDisable         = false;
1147 
1148       vs.VertexURBEntryReadLength      = vs_prog_data->base.urb_read_length;
1149       vs.VertexURBEntryReadOffset      = 0;
1150       vs.DispatchGRFStartRegisterForURBData =
1151          vs_prog_data->base.base.dispatch_grf_start_reg;
1152 
1153 #if GEN_GEN >= 8
1154       vs.UserClipDistanceClipTestEnableBitmask =
1155          vs_prog_data->base.clip_distance_mask;
1156       vs.UserClipDistanceCullTestEnableBitmask =
1157          vs_prog_data->base.cull_distance_mask;
1158 #endif
1159 
1160       vs.PerThreadScratchSpace   = get_scratch_space(vs_bin);
1161       vs.ScratchSpaceBasePointer =
1162          get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin);
1163    }
1164 }
1165 
1166 static void
1167 emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline,
1168                       const VkPipelineTessellationStateCreateInfo *tess_info)
1169 {
1170    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
1171       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs);
1172       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te);
1173       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds);
1174       return;
1175    }
1176 
1177    const struct gen_device_info *devinfo = &pipeline->device->info;
1178    const struct anv_shader_bin *tcs_bin =
1179       pipeline->shaders[MESA_SHADER_TESS_CTRL];
1180    const struct anv_shader_bin *tes_bin =
1181       pipeline->shaders[MESA_SHADER_TESS_EVAL];
1182 
1183    const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline);
1184    const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline);
1185 
1186    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) {
1187       hs.Enable = true;
1188       hs.StatisticsEnable = true;
1189       hs.KernelStartPointer = tcs_bin->kernel.offset;
1190 
1191       hs.SamplerCount = get_sampler_count(tcs_bin);
1192       hs.BindingTableEntryCount = get_binding_table_entry_count(tcs_bin);
1193       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
1194       hs.IncludeVertexHandles = true;
1195       hs.InstanceCount = tcs_prog_data->instances - 1;
1196 
1197       hs.VertexURBEntryReadLength = 0;
1198       hs.VertexURBEntryReadOffset = 0;
1199       hs.DispatchGRFStartRegisterForURBData =
1200          tcs_prog_data->base.base.dispatch_grf_start_reg;
1201 
1202       hs.PerThreadScratchSpace = get_scratch_space(tcs_bin);
1203       hs.ScratchSpaceBasePointer =
1204          get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
1205    }
1206 
1207    const VkPipelineTessellationDomainOriginStateCreateInfoKHR *domain_origin_state =
1208       tess_info ? vk_find_struct_const(tess_info, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR) : NULL;
1209 
1210    VkTessellationDomainOriginKHR uv_origin =
1211       domain_origin_state ? domain_origin_state->domainOrigin :
1212                             VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR;
1213 
1214    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) {
1215       te.Partitioning = tes_prog_data->partitioning;
1216 
1217       if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR) {
1218          te.OutputTopology = tes_prog_data->output_topology;
1219       } else {
1220          /* When the origin is upper-left, we have to flip the winding order */
1221          if (tes_prog_data->output_topology == OUTPUT_TRI_CCW) {
1222             te.OutputTopology = OUTPUT_TRI_CW;
1223          } else if (tes_prog_data->output_topology == OUTPUT_TRI_CW) {
1224             te.OutputTopology = OUTPUT_TRI_CCW;
1225          } else {
1226             te.OutputTopology = tes_prog_data->output_topology;
1227          }
1228       }
1229 
1230       te.TEDomain = tes_prog_data->domain;
1231       te.TEEnable = true;
1232       te.MaximumTessellationFactorOdd = 63.0;
1233       te.MaximumTessellationFactorNotOdd = 64.0;
1234    }
1235 
1236    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) {
1237       ds.Enable = true;
1238       ds.StatisticsEnable = true;
1239       ds.KernelStartPointer = tes_bin->kernel.offset;
1240 
1241       ds.SamplerCount = get_sampler_count(tes_bin);
1242       ds.BindingTableEntryCount = get_binding_table_entry_count(tes_bin);
1243       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
1244 
1245       ds.ComputeWCoordinateEnable =
1246          tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
1247 
1248       ds.PatchURBEntryReadLength = tes_prog_data->base.urb_read_length;
1249       ds.PatchURBEntryReadOffset = 0;
1250       ds.DispatchGRFStartRegisterForURBData =
1251          tes_prog_data->base.base.dispatch_grf_start_reg;
1252 
1253 #if GEN_GEN >= 8
1254       ds.DispatchMode =
1255          tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8 ?
1256             DISPATCH_MODE_SIMD8_SINGLE_PATCH :
1257             DISPATCH_MODE_SIMD4X2;
1258 
1259       ds.UserClipDistanceClipTestEnableBitmask =
1260          tes_prog_data->base.clip_distance_mask;
1261       ds.UserClipDistanceCullTestEnableBitmask =
1262          tes_prog_data->base.cull_distance_mask;
1263 #endif
1264 
1265       ds.PerThreadScratchSpace = get_scratch_space(tes_bin);
1266       ds.ScratchSpaceBasePointer =
1267          get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
1268    }
1269 }
1270 
1271 static void
1272 emit_3dstate_gs(struct anv_pipeline *pipeline)
1273 {
1274    const struct gen_device_info *devinfo = &pipeline->device->info;
1275    const struct anv_shader_bin *gs_bin =
1276       pipeline->shaders[MESA_SHADER_GEOMETRY];
1277 
1278    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
1279       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
1280       return;
1281    }
1282 
1283    const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
1284 
1285    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
1286       gs.Enable                  = true;
1287       gs.StatisticsEnable        = true;
1288       gs.KernelStartPointer      = gs_bin->kernel.offset;
1289       gs.DispatchMode            = gs_prog_data->base.dispatch_mode;
1290 
1291       gs.SingleProgramFlow       = false;
1292       gs.VectorMaskEnable        = false;
1293       gs.SamplerCount            = get_sampler_count(gs_bin);
1294       gs.BindingTableEntryCount  = get_binding_table_entry_count(gs_bin);
1295       gs.IncludeVertexHandles    = gs_prog_data->base.include_vue_handles;
1296       gs.IncludePrimitiveID      = gs_prog_data->include_primitive_id;
1297 
1298       if (GEN_GEN == 8) {
1299          /* Broadwell is weird.  It needs us to divide by 2. */
1300          gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
1301       } else {
1302          gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
1303       }
1304 
1305       gs.OutputVertexSize        = gs_prog_data->output_vertex_size_hwords * 2 - 1;
1306       gs.OutputTopology          = gs_prog_data->output_topology;
1307       gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1308       gs.ControlDataFormat       = gs_prog_data->control_data_format;
1309       gs.ControlDataHeaderSize   = gs_prog_data->control_data_header_size_hwords;
1310       gs.InstanceControl         = MAX2(gs_prog_data->invocations, 1) - 1;
1311       gs.ReorderMode             = TRAILING;
1312 
1313 #if GEN_GEN >= 8
1314       gs.ExpectedVertexCount     = gs_prog_data->vertices_in;
1315       gs.StaticOutput            = gs_prog_data->static_vertex_count >= 0;
1316       gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ?
1317                                    gs_prog_data->static_vertex_count : 0;
1318 #endif
1319 
1320       gs.VertexURBEntryReadOffset = 0;
1321       gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1322       gs.DispatchGRFStartRegisterForURBData =
1323          gs_prog_data->base.base.dispatch_grf_start_reg;
1324 
1325 #if GEN_GEN >= 8
1326       gs.UserClipDistanceClipTestEnableBitmask =
1327          gs_prog_data->base.clip_distance_mask;
1328       gs.UserClipDistanceCullTestEnableBitmask =
1329          gs_prog_data->base.cull_distance_mask;
1330 #endif
1331 
1332       gs.PerThreadScratchSpace   = get_scratch_space(gs_bin);
1333       gs.ScratchSpaceBasePointer =
1334          get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin);
1335    }
1336 }
1337 
1338 static bool
1339 has_color_buffer_write_enabled(const struct anv_pipeline *pipeline,
1340                                const VkPipelineColorBlendStateCreateInfo *blend)
1341 {
1342    const struct anv_shader_bin *shader_bin =
1343       pipeline->shaders[MESA_SHADER_FRAGMENT];
1344    if (!shader_bin)
1345       return false;
1346 
1347    const struct anv_pipeline_bind_map *bind_map = &shader_bin->bind_map;
1348    for (int i = 0; i < bind_map->surface_count; i++) {
1349       struct anv_pipeline_binding *binding = &bind_map->surface_to_descriptor[i];
1350 
1351       if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
1352          continue;
1353 
1354       if (binding->index == UINT32_MAX)
1355          continue;
1356 
1357       if (blend->pAttachments[binding->index].colorWriteMask != 0)
1358          return true;
1359    }
1360 
1361    return false;
1362 }
1363 
1364 static void
1365 emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass,
1366                 const VkPipelineColorBlendStateCreateInfo *blend,
1367                 const VkPipelineMultisampleStateCreateInfo *multisample)
1368 {
1369    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1370 
1371    MAYBE_UNUSED uint32_t samples =
1372       multisample ? multisample->rasterizationSamples : 1;
1373 
1374    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
1375       wm.StatisticsEnable                    = true;
1376       wm.LineEndCapAntialiasingRegionWidth   = _05pixels;
1377       wm.LineAntialiasingRegionWidth         = _10pixels;
1378       wm.PointRasterizationRule              = RASTRULE_UPPER_RIGHT;
1379 
1380       if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1381          if (wm_prog_data->early_fragment_tests) {
1382             wm.EarlyDepthStencilControl         = EDSC_PREPS;
1383          } else if (wm_prog_data->has_side_effects) {
1384             wm.EarlyDepthStencilControl         = EDSC_PSEXEC;
1385          } else {
1386             wm.EarlyDepthStencilControl         = EDSC_NORMAL;
1387          }
1388 
1389          wm.BarycentricInterpolationMode =
1390             wm_prog_data->barycentric_interp_modes;
1391 
1392 #if GEN_GEN < 8
1393          wm.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
1394          wm.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
1395          wm.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
1396          wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1397 
1398          /* If the subpass has a depth or stencil self-dependency, then we
1399           * need to force the hardware to do the depth/stencil write *after*
1400           * fragment shader execution.  Otherwise, the writes may hit memory
1401           * before we get around to fetching from the input attachment and we
1402           * may get the depth or stencil value from the current draw rather
1403           * than the previous one.
1404           */
1405          wm.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
1406                                             wm_prog_data->uses_kill;
1407 
1408          if (wm.PixelShaderComputedDepthMode != PSCDEPTH_OFF ||
1409              wm_prog_data->has_side_effects ||
1410              wm.PixelShaderKillsPixel ||
1411              has_color_buffer_write_enabled(pipeline, blend))
1412             wm.ThreadDispatchEnable = true;
1413 
1414          if (samples > 1) {
1415             wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
1416             if (wm_prog_data->persample_dispatch) {
1417                wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1418             } else {
1419                wm.MultisampleDispatchMode = MSDISPMODE_PERPIXEL;
1420             }
1421          } else {
1422             wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
1423             wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1424          }
1425 #endif
1426       }
1427    }
1428 }
1429 
1430 UNUSED static bool
1431 is_dual_src_blend_factor(VkBlendFactor factor)
1432 {
1433    return factor == VK_BLEND_FACTOR_SRC1_COLOR ||
1434           factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR ||
1435           factor == VK_BLEND_FACTOR_SRC1_ALPHA ||
1436           factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
1437 }
1438 
1439 static void
1440 emit_3dstate_ps(struct anv_pipeline *pipeline,
1441                 const VkPipelineColorBlendStateCreateInfo *blend)
1442 {
1443    MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info;
1444    const struct anv_shader_bin *fs_bin =
1445       pipeline->shaders[MESA_SHADER_FRAGMENT];
1446 
1447    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1448       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1449 #if GEN_GEN == 7
1450          /* Even if no fragments are ever dispatched, gen7 hardware hangs if
1451           * we don't at least set the maximum number of threads.
1452           */
1453          ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1454 #endif
1455       }
1456       return;
1457    }
1458 
1459    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1460 
1461 #if GEN_GEN < 8
1462    /* The hardware wedges if you have this bit set but don't turn on any dual
1463     * source blend factors.
1464     */
1465    bool dual_src_blend = false;
1466    if (wm_prog_data->dual_src_blend && blend) {
1467       for (uint32_t i = 0; i < blend->attachmentCount; i++) {
1468          const VkPipelineColorBlendAttachmentState *bstate =
1469             &blend->pAttachments[i];
1470 
1471          if (bstate->blendEnable &&
1472              (is_dual_src_blend_factor(bstate->srcColorBlendFactor) ||
1473               is_dual_src_blend_factor(bstate->dstColorBlendFactor) ||
1474               is_dual_src_blend_factor(bstate->srcAlphaBlendFactor) ||
1475               is_dual_src_blend_factor(bstate->dstAlphaBlendFactor))) {
1476             dual_src_blend = true;
1477             break;
1478          }
1479       }
1480    }
1481 #endif
1482 
1483    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1484       ps.KernelStartPointer0        = fs_bin->kernel.offset;
1485       ps.KernelStartPointer1        = 0;
1486       ps.KernelStartPointer2        = fs_bin->kernel.offset +
1487                                       wm_prog_data->prog_offset_2;
1488       ps._8PixelDispatchEnable      = wm_prog_data->dispatch_8;
1489       ps._16PixelDispatchEnable     = wm_prog_data->dispatch_16;
1490       ps._32PixelDispatchEnable     = false;
1491 
1492       ps.SingleProgramFlow          = false;
1493       ps.VectorMaskEnable           = true;
1494       ps.SamplerCount               = get_sampler_count(fs_bin);
1495       ps.BindingTableEntryCount     = get_binding_table_entry_count(fs_bin);
1496       ps.PushConstantEnable         = wm_prog_data->base.nr_params > 0 ||
1497                                       wm_prog_data->base.ubo_ranges[0].length;
1498       ps.PositionXYOffsetSelect     = wm_prog_data->uses_pos_offset ?
1499                                       POSOFFSET_SAMPLE: POSOFFSET_NONE;
1500 #if GEN_GEN < 8
1501       ps.AttributeEnable            = wm_prog_data->num_varying_inputs > 0;
1502       ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1503       ps.DualSourceBlendEnable      = dual_src_blend;
1504 #endif
1505 
1506 #if GEN_IS_HASWELL
1507       /* Haswell requires the sample mask to be set in this packet as well
1508        * as in 3DSTATE_SAMPLE_MASK; the values should match.
1509        */
1510       ps.SampleMask                 = 0xff;
1511 #endif
1512 
1513 #if GEN_GEN >= 9
1514       ps.MaximumNumberofThreadsPerPSD  = 64 - 1;
1515 #elif GEN_GEN >= 8
1516       ps.MaximumNumberofThreadsPerPSD  = 64 - 2;
1517 #else
1518       ps.MaximumNumberofThreads        = devinfo->max_wm_threads - 1;
1519 #endif
1520 
1521       ps.DispatchGRFStartRegisterForConstantSetupData0 =
1522          wm_prog_data->base.dispatch_grf_start_reg;
1523       ps.DispatchGRFStartRegisterForConstantSetupData1 = 0;
1524       ps.DispatchGRFStartRegisterForConstantSetupData2 =
1525          wm_prog_data->dispatch_grf_start_reg_2;
1526 
1527       ps.PerThreadScratchSpace   = get_scratch_space(fs_bin);
1528       ps.ScratchSpaceBasePointer =
1529          get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin);
1530    }
1531 }
1532 
1533 #if GEN_GEN >= 8
1534 static void
1535 emit_3dstate_ps_extra(struct anv_pipeline *pipeline,
1536                       struct anv_subpass *subpass,
1537                       const VkPipelineColorBlendStateCreateInfo *blend)
1538 {
1539    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1540 
1541    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1542       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps);
1543       return;
1544    }
1545 
1546    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) {
1547       ps.PixelShaderValid              = true;
1548       ps.AttributeEnable               = wm_prog_data->num_varying_inputs > 0;
1549       ps.oMaskPresenttoRenderTarget    = wm_prog_data->uses_omask;
1550       ps.PixelShaderIsPerSample        = wm_prog_data->persample_dispatch;
1551       ps.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
1552       ps.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
1553       ps.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
1554 
1555       /* If the subpass has a depth or stencil self-dependency, then we need
1556        * to force the hardware to do the depth/stencil write *after* fragment
1557        * shader execution.  Otherwise, the writes may hit memory before we get
1558        * around to fetching from the input attachment and we may get the depth
1559        * or stencil value from the current draw rather than the previous one.
1560        */
1561       ps.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
1562                                          wm_prog_data->uses_kill;
1563 
1564       /* The stricter cross-primitive coherency guarantees that the hardware
1565        * gives us with the "Accesses UAV" bit set for at least one shader stage
1566        * and the "UAV coherency required" bit set on the 3DPRIMITIVE command are
1567        * redundant within the current image, atomic counter and SSBO GL APIs,
1568        * which all have very loose ordering and coherency requirements and
1569        * generally rely on the application to insert explicit barriers when a
1570        * shader invocation is expected to see the memory writes performed by the
1571        * invocations of some previous primitive.  Regardless of the value of
1572        * "UAV coherency required", the "Accesses UAV" bits will implicitly cause
1573        * an in most cases useless DC flush when the lowermost stage with the bit
1574        * set finishes execution.
1575        *
1576        * It would be nice to disable it, but in some cases we can't because on
1577        * Gen8+ it also has an influence on rasterization via the PS UAV-only
1578        * signal (which could be set independently from the coherency mechanism
1579        * in the 3DSTATE_WM command on Gen7), and because in some cases it will
1580        * determine whether the hardware skips execution of the fragment shader
1581        * or not via the ThreadDispatchEnable signal.  However if we know that
1582        * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
1583        * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
1584        * difference so we may just disable it here.
1585        *
1586        * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
1587        * take into account KillPixels when no depth or stencil writes are
1588        * enabled. In order for occlusion queries to work correctly with no
1589        * attachments, we need to force-enable here.
1590        */
1591       if ((wm_prog_data->has_side_effects || wm_prog_data->uses_kill) &&
1592           !has_color_buffer_write_enabled(pipeline, blend))
1593          ps.PixelShaderHasUAV = true;
1594 
1595 #if GEN_GEN >= 9
1596       ps.PixelShaderPullsBary    = wm_prog_data->pulls_bary;
1597       ps.InputCoverageMaskState  = wm_prog_data->uses_sample_mask ?
1598                                    ICMS_INNER_CONSERVATIVE : ICMS_NONE;
1599 #else
1600       ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1601 #endif
1602    }
1603 }
1604 
1605 static void
1606 emit_3dstate_vf_topology(struct anv_pipeline *pipeline)
1607 {
1608    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
1609       vft.PrimitiveTopologyType = pipeline->topology;
1610    }
1611 }
1612 #endif
1613 
1614 static void
1615 emit_3dstate_vf_statistics(struct anv_pipeline *pipeline)
1616 {
1617    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) {
1618       vfs.StatisticsEnable = true;
1619    }
1620 }
1621 
1622 static void
1623 compute_kill_pixel(struct anv_pipeline *pipeline,
1624                    const VkPipelineMultisampleStateCreateInfo *ms_info,
1625                    const struct anv_subpass *subpass)
1626 {
1627    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1628       pipeline->kill_pixel = false;
1629       return;
1630    }
1631 
1632    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1633 
1634    /* This computes the KillPixel portion of the computation for whether or
1635     * not we want to enable the PMA fix on gen8 or gen9.  It's given by this
1636     * chunk of the giant formula:
1637     *
1638     *    (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1639     *     3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1640     *     3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1641     *     3DSTATE_PS_BLEND::AlphaTestEnable ||
1642     *     3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1643     *
1644     * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false and so is
1645     * 3DSTATE_PS_BLEND::AlphaTestEnable since Vulkan doesn't have a concept
1646     * of an alpha test.
1647     */
1648    pipeline->kill_pixel =
1649       subpass->has_ds_self_dep || wm_prog_data->uses_kill ||
1650       wm_prog_data->uses_omask ||
1651       (ms_info && ms_info->alphaToCoverageEnable);
1652 }
1653 
1654 static VkResult
1655 genX(graphics_pipeline_create)(
1656     VkDevice                                    _device,
1657     struct anv_pipeline_cache *                 cache,
1658     const VkGraphicsPipelineCreateInfo*         pCreateInfo,
1659     const VkAllocationCallbacks*                pAllocator,
1660     VkPipeline*                                 pPipeline)
1661 {
1662    ANV_FROM_HANDLE(anv_device, device, _device);
1663    ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
1664    struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1665    struct anv_pipeline *pipeline;
1666    VkResult result;
1667 
1668    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1669 
1670    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1671                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1672    if (pipeline == NULL)
1673       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1674 
1675    result = anv_pipeline_init(pipeline, device, cache,
1676                               pCreateInfo, pAllocator);
1677    if (result != VK_SUCCESS) {
1678       vk_free2(&device->alloc, pAllocator, pipeline);
1679       return result;
1680    }
1681 
1682    assert(pCreateInfo->pVertexInputState);
1683    emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
1684    assert(pCreateInfo->pRasterizationState);
1685    emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
1686                  pCreateInfo->pMultisampleState, pass, subpass);
1687    emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
1688    emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
1689    emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
1690                            pCreateInfo->pMultisampleState);
1691    compute_kill_pixel(pipeline, pCreateInfo->pMultisampleState, subpass);
1692 
1693    emit_urb_setup(pipeline);
1694 
1695    emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
1696                      pCreateInfo->pRasterizationState);
1697    emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
1698 
1699 #if 0
1700    /* From gen7_vs_state.c */
1701 
1702    /**
1703     * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
1704     * Geometry > Geometry Shader > State:
1705     *
1706     *     "Note: Because of corruption in IVB:GT2, software needs to flush the
1707     *     whole fixed function pipeline when the GS enable changes value in
1708     *     the 3DSTATE_GS."
1709     *
1710     * The hardware architects have clarified that in this context "flush the
1711     * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
1712     * Stall" bit set.
1713     */
1714    if (!device->info.is_haswell && !device->info.is_baytrail)
1715       gen7_emit_vs_workaround_flush(brw);
1716 #endif
1717 
1718    emit_3dstate_vs(pipeline);
1719    emit_3dstate_hs_te_ds(pipeline, pCreateInfo->pTessellationState);
1720    emit_3dstate_gs(pipeline);
1721    emit_3dstate_sbe(pipeline);
1722    emit_3dstate_wm(pipeline, subpass, pCreateInfo->pColorBlendState,
1723                    pCreateInfo->pMultisampleState);
1724    emit_3dstate_ps(pipeline, pCreateInfo->pColorBlendState);
1725 #if GEN_GEN >= 8
1726    emit_3dstate_ps_extra(pipeline, subpass, pCreateInfo->pColorBlendState);
1727    emit_3dstate_vf_topology(pipeline);
1728 #endif
1729    emit_3dstate_vf_statistics(pipeline);
1730 
1731    *pPipeline = anv_pipeline_to_handle(pipeline);
1732 
1733    return pipeline->batch.status;
1734 }
1735 
1736 static VkResult
1737 compute_pipeline_create(
1738     VkDevice                                    _device,
1739     struct anv_pipeline_cache *                 cache,
1740     const VkComputePipelineCreateInfo*          pCreateInfo,
1741     const VkAllocationCallbacks*                pAllocator,
1742     VkPipeline*                                 pPipeline)
1743 {
1744    ANV_FROM_HANDLE(anv_device, device, _device);
1745    const struct anv_physical_device *physical_device =
1746       &device->instance->physicalDevice;
1747    const struct gen_device_info *devinfo = &physical_device->info;
1748    struct anv_pipeline *pipeline;
1749    VkResult result;
1750 
1751    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
1752 
1753    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1754                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1755    if (pipeline == NULL)
1756       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1757 
1758    pipeline->device = device;
1759    pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1760 
1761    pipeline->blend_state.map = NULL;
1762 
1763    result = anv_reloc_list_init(&pipeline->batch_relocs,
1764                                 pAllocator ? pAllocator : &device->alloc);
1765    if (result != VK_SUCCESS) {
1766       vk_free2(&device->alloc, pAllocator, pipeline);
1767       return result;
1768    }
1769    pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1770    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1771    pipeline->batch.relocs = &pipeline->batch_relocs;
1772    pipeline->batch.status = VK_SUCCESS;
1773 
1774    /* When we free the pipeline, we detect stages based on the NULL status
1775     * of various prog_data pointers.  Make them NULL by default.
1776     */
1777    memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1778 
1779    pipeline->active_stages = 0;
1780 
1781    pipeline->needs_data_cache = false;
1782 
1783    assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
1784    ANV_FROM_HANDLE(anv_shader_module, module,  pCreateInfo->stage.module);
1785    result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
1786                                     pCreateInfo->stage.pName,
1787                                     pCreateInfo->stage.pSpecializationInfo);
1788    if (result != VK_SUCCESS) {
1789       vk_free2(&device->alloc, pAllocator, pipeline);
1790       return result;
1791    }
1792 
1793    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1794 
1795    anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
1796 
1797    uint32_t group_size = cs_prog_data->local_size[0] *
1798       cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
1799    uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
1800 
1801    if (remainder > 0)
1802       pipeline->cs_right_mask = ~0u >> (32 - remainder);
1803    else
1804       pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
1805 
1806    const uint32_t vfe_curbe_allocation =
1807       ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
1808             cs_prog_data->push.cross_thread.regs, 2);
1809 
1810    const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
1811 
1812    const struct anv_shader_bin *cs_bin =
1813       pipeline->shaders[MESA_SHADER_COMPUTE];
1814 
1815    anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) {
1816 #if GEN_GEN > 7
1817       vfe.StackSize              = 0;
1818 #else
1819       vfe.GPGPUMode              = true;
1820 #endif
1821       vfe.MaximumNumberofThreads =
1822          devinfo->max_cs_threads * subslices - 1;
1823       vfe.NumberofURBEntries     = GEN_GEN <= 7 ? 0 : 2;
1824       vfe.ResetGatewayTimer      = true;
1825 #if GEN_GEN <= 8
1826       vfe.BypassGatewayControl   = true;
1827 #endif
1828       vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2;
1829       vfe.CURBEAllocationSize    = vfe_curbe_allocation;
1830 
1831       vfe.PerThreadScratchSpace = get_scratch_space(cs_bin);
1832       vfe.ScratchSpaceBasePointer =
1833          get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin);
1834    }
1835 
1836    struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
1837       .KernelStartPointer     = cs_bin->kernel.offset,
1838 
1839       .SamplerCount           = get_sampler_count(cs_bin),
1840       .BindingTableEntryCount = get_binding_table_entry_count(cs_bin),
1841       .BarrierEnable          = cs_prog_data->uses_barrier,
1842       .SharedLocalMemorySize  =
1843          encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared),
1844 
1845 #if !GEN_IS_HASWELL
1846       .ConstantURBEntryReadOffset = 0,
1847 #endif
1848       .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
1849 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1850       .CrossThreadConstantDataReadLength =
1851          cs_prog_data->push.cross_thread.regs,
1852 #endif
1853 
1854       .NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads,
1855    };
1856    GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL,
1857                                         pipeline->interface_descriptor_data,
1858                                         &desc);
1859 
1860    *pPipeline = anv_pipeline_to_handle(pipeline);
1861 
1862    return pipeline->batch.status;
1863 }
1864 
1865 VkResult genX(CreateGraphicsPipelines)(
1866     VkDevice                                    _device,
1867     VkPipelineCache                             pipelineCache,
1868     uint32_t                                    count,
1869     const VkGraphicsPipelineCreateInfo*         pCreateInfos,
1870     const VkAllocationCallbacks*                pAllocator,
1871     VkPipeline*                                 pPipelines)
1872 {
1873    ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1874 
1875    VkResult result = VK_SUCCESS;
1876 
1877    unsigned i;
1878    for (i = 0; i < count; i++) {
1879       result = genX(graphics_pipeline_create)(_device,
1880                                               pipeline_cache,
1881                                               &pCreateInfos[i],
1882                                               pAllocator, &pPipelines[i]);
1883 
1884       /* Bail out on the first error as it is not obvious what error should be
1885        * report upon 2 different failures. */
1886       if (result != VK_SUCCESS)
1887          break;
1888    }
1889 
1890    for (; i < count; i++)
1891       pPipelines[i] = VK_NULL_HANDLE;
1892 
1893    return result;
1894 }
1895 
1896 VkResult genX(CreateComputePipelines)(
1897     VkDevice                                    _device,
1898     VkPipelineCache                             pipelineCache,
1899     uint32_t                                    count,
1900     const VkComputePipelineCreateInfo*          pCreateInfos,
1901     const VkAllocationCallbacks*                pAllocator,
1902     VkPipeline*                                 pPipelines)
1903 {
1904    ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1905 
1906    VkResult result = VK_SUCCESS;
1907 
1908    unsigned i;
1909    for (i = 0; i < count; i++) {
1910       result = compute_pipeline_create(_device, pipeline_cache,
1911                                        &pCreateInfos[i],
1912                                        pAllocator, &pPipelines[i]);
1913 
1914       /* Bail out on the first error as it is not obvious what error should be
1915        * report upon 2 different failures. */
1916       if (result != VK_SUCCESS)
1917          break;
1918    }
1919 
1920    for (; i < count; i++)
1921       pPipelines[i] = VK_NULL_HANDLE;
1922 
1923    return result;
1924 }
1925