1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/bufferobj.h"
25 #include "main/context.h"
26 #include "main/enums.h"
27 #include "main/macros.h"
28
29 #include "brw_draw.h"
30 #include "brw_defines.h"
31 #include "brw_context.h"
32 #include "brw_state.h"
33
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
36
37 #ifndef NDEBUG
38 static bool
is_passthru_format(uint32_t format)39 is_passthru_format(uint32_t format)
40 {
41 switch (format) {
42 case BRW_SURFACEFORMAT_R64_PASSTHRU:
43 case BRW_SURFACEFORMAT_R64G64_PASSTHRU:
44 case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU:
45 case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU:
46 return true;
47 default:
48 return false;
49 }
50 }
51 #endif
52
53 static void
gen8_emit_vertices(struct brw_context * brw)54 gen8_emit_vertices(struct brw_context *brw)
55 {
56 struct gl_context *ctx = &brw->ctx;
57 bool uses_edge_flag;
58
59 brw_prepare_vertices(brw);
60 brw_prepare_shader_draw_parameters(brw);
61
62 uses_edge_flag = (ctx->Polygon.FrontMode != GL_FILL ||
63 ctx->Polygon.BackMode != GL_FILL);
64
65 const struct brw_vs_prog_data *vs_prog_data =
66 brw_vs_prog_data(brw->vs.base.prog_data);
67
68 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) {
69 unsigned vue = brw->vb.nr_enabled;
70
71 /* The element for the edge flags must always be last, so we have to
72 * insert the SGVS before it in that case.
73 */
74 if (uses_edge_flag) {
75 assert(vue > 0);
76 vue--;
77 }
78
79 WARN_ONCE(vue >= 33,
80 "Trying to insert VID/IID past 33rd vertex element, "
81 "need to reorder the vertex attrbutes.");
82
83 unsigned dw1 = 0;
84 if (vs_prog_data->uses_vertexid) {
85 dw1 |= GEN8_SGVS_ENABLE_VERTEX_ID |
86 (2 << GEN8_SGVS_VERTEX_ID_COMPONENT_SHIFT) | /* .z channel */
87 (vue << GEN8_SGVS_VERTEX_ID_ELEMENT_OFFSET_SHIFT);
88 }
89
90 if (vs_prog_data->uses_instanceid) {
91 dw1 |= GEN8_SGVS_ENABLE_INSTANCE_ID |
92 (3 << GEN8_SGVS_INSTANCE_ID_COMPONENT_SHIFT) | /* .w channel */
93 (vue << GEN8_SGVS_INSTANCE_ID_ELEMENT_OFFSET_SHIFT);
94 }
95
96 BEGIN_BATCH(2);
97 OUT_BATCH(_3DSTATE_VF_SGVS << 16 | (2 - 2));
98 OUT_BATCH(dw1);
99 ADVANCE_BATCH();
100
101 BEGIN_BATCH(3);
102 OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
103 OUT_BATCH(vue | GEN8_VF_INSTANCING_ENABLE);
104 OUT_BATCH(0);
105 ADVANCE_BATCH();
106 } else {
107 BEGIN_BATCH(2);
108 OUT_BATCH(_3DSTATE_VF_SGVS << 16 | (2 - 2));
109 OUT_BATCH(0);
110 ADVANCE_BATCH();
111 }
112
113 /* Normally we don't need an element for the SGVS attribute because the
114 * 3DSTATE_VF_SGVS instruction lets you store the generated attribute in an
115 * element that is past the list in 3DSTATE_VERTEX_ELEMENTS. However if
116 * we're using draw parameters then we need an element for the those
117 * values. Additionally if there is an edge flag element then the SGVS
118 * can't be inserted past that so we need a dummy element to ensure that
119 * the edge flag is the last one.
120 */
121 const bool needs_sgvs_element = (vs_prog_data->uses_basevertex ||
122 vs_prog_data->uses_baseinstance ||
123 ((vs_prog_data->uses_instanceid ||
124 vs_prog_data->uses_vertexid) &&
125 uses_edge_flag));
126 const unsigned nr_elements =
127 brw->vb.nr_enabled + needs_sgvs_element + vs_prog_data->uses_drawid;
128
129 /* If the VS doesn't read any inputs (calculating vertex position from
130 * a state variable for some reason, for example), emit a single pad
131 * VERTEX_ELEMENT struct and bail.
132 *
133 * The stale VB state stays in place, but they don't do anything unless
134 * a VE loads from them.
135 */
136 if (nr_elements == 0) {
137 BEGIN_BATCH(3);
138 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (3 - 2));
139 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
140 GEN6_VE0_VALID |
141 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
142 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
143 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
144 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
145 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
146 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
147 ADVANCE_BATCH();
148 return;
149 }
150
151 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
152 const bool uses_draw_params =
153 vs_prog_data->uses_basevertex ||
154 vs_prog_data->uses_baseinstance;
155 const unsigned nr_buffers = brw->vb.nr_buffers +
156 uses_draw_params + vs_prog_data->uses_drawid;
157
158 if (nr_buffers) {
159 assert(nr_buffers <= 33);
160
161 BEGIN_BATCH(1 + 4 * nr_buffers);
162 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
163 for (unsigned i = 0; i < brw->vb.nr_buffers; i++) {
164 const struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
165 EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo,
166 buffer->offset,
167 buffer->offset + buffer->size,
168 buffer->stride, 0 /* unused */);
169 }
170
171 if (uses_draw_params) {
172 EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
173 brw->draw.draw_params_bo,
174 brw->draw.draw_params_offset,
175 brw->draw.draw_params_bo->size,
176 0 /* stride */,
177 0 /* unused */);
178 }
179
180 if (vs_prog_data->uses_drawid) {
181 EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
182 brw->draw.draw_id_bo,
183 brw->draw.draw_id_offset,
184 brw->draw.draw_id_bo->size,
185 0 /* stride */,
186 0 /* unused */);
187 }
188 ADVANCE_BATCH();
189 }
190
191 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
192 * presumably for VertexID/InstanceID.
193 */
194 assert(nr_elements <= 34);
195
196 struct brw_vertex_element *gen6_edgeflag_input = NULL;
197
198 BEGIN_BATCH(1 + nr_elements * 2);
199 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
200 for (unsigned i = 0; i < brw->vb.nr_enabled; i++) {
201 struct brw_vertex_element *input = brw->vb.enabled[i];
202 uint32_t format = brw_get_vertex_surface_type(brw, input->glarray);
203 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
204 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
205 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
206 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
207
208 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
209 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
210 * element which has edge flag enabled."
211 */
212 assert(!(is_passthru_format(format) && uses_edge_flag));
213
214 /* The gen4 driver expects edgeflag to come in as a float, and passes
215 * that float on to the tests in the clipper. Mesa's current vertex
216 * attribute value for EdgeFlag is stored as a float, which works out.
217 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
218 * integer ubyte. Just rewrite that to convert to a float.
219 */
220 if (input == &brw->vb.inputs[VERT_ATTRIB_EDGEFLAG]) {
221 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
222 * of in the VUE. We have to upload it sideband as the last vertex
223 * element according to the B-Spec.
224 */
225 gen6_edgeflag_input = input;
226 continue;
227 }
228
229 switch (input->glarray->Size) {
230 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
231 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
232 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
233 case 3:
234 if (input->glarray->Doubles) {
235 comp3 = BRW_VE1_COMPONENT_STORE_0;
236 } else if (input->glarray->Integer) {
237 comp3 = BRW_VE1_COMPONENT_STORE_1_INT;
238 } else {
239 comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
240 }
241
242 break;
243 }
244
245 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
246 *
247 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
248 * formats, 64-bit components are stored in the URB without any
249 * conversion. In this case, vertex elements must be written as 128
250 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output
251 * as required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
252 * component into the URB, Component 1 must be specified as
253 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE)
254 * in order to output a 128-bit vertex element, or Components 1-3 must
255 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
256 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
257 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
258 * element."
259 */
260 if (input->glarray->Doubles && !input->is_dual_slot) {
261 /* Store vertex elements which correspond to double and dvec2 vertex
262 * shader inputs as 128-bit vertex elements, instead of 256-bits.
263 */
264 comp2 = BRW_VE1_COMPONENT_NOSTORE;
265 comp3 = BRW_VE1_COMPONENT_NOSTORE;
266 }
267
268 OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
269 GEN6_VE0_VALID |
270 (format << BRW_VE0_FORMAT_SHIFT) |
271 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
272
273 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
274 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
275 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
276 (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
277 }
278
279 if (needs_sgvs_element) {
280 if (vs_prog_data->uses_basevertex ||
281 vs_prog_data->uses_baseinstance) {
282 OUT_BATCH(GEN6_VE0_VALID |
283 brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
284 BRW_SURFACEFORMAT_R32G32_UINT << BRW_VE0_FORMAT_SHIFT);
285 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
286 (BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_1_SHIFT) |
287 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
288 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
289 } else {
290 OUT_BATCH(GEN6_VE0_VALID);
291 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
292 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
293 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
294 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
295 }
296 }
297
298 if (vs_prog_data->uses_drawid) {
299 OUT_BATCH(GEN6_VE0_VALID |
300 ((brw->vb.nr_buffers + 1) << GEN6_VE0_INDEX_SHIFT) |
301 (BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT));
302 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
303 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
304 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
305 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
306 }
307
308 if (gen6_edgeflag_input) {
309 uint32_t format =
310 brw_get_vertex_surface_type(brw, gen6_edgeflag_input->glarray);
311
312 OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
313 GEN6_VE0_VALID |
314 GEN6_VE0_EDGE_FLAG_ENABLE |
315 (format << BRW_VE0_FORMAT_SHIFT) |
316 (gen6_edgeflag_input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
317 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
318 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
319 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
320 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
321 }
322 ADVANCE_BATCH();
323
324 for (unsigned i = 0, j = 0; i < brw->vb.nr_enabled; i++) {
325 const struct brw_vertex_element *input = brw->vb.enabled[i];
326 const struct brw_vertex_buffer *buffer = &brw->vb.buffers[input->buffer];
327 unsigned element_index;
328
329 /* The edge flag element is reordered to be the last one in the code
330 * above so we need to compensate for that in the element indices used
331 * below.
332 */
333 if (input == gen6_edgeflag_input)
334 element_index = nr_elements - 1;
335 else
336 element_index = j++;
337
338 BEGIN_BATCH(3);
339 OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
340 OUT_BATCH(element_index |
341 (buffer->step_rate ? GEN8_VF_INSTANCING_ENABLE : 0));
342 OUT_BATCH(buffer->step_rate);
343 ADVANCE_BATCH();
344 }
345
346 if (vs_prog_data->uses_drawid) {
347 const unsigned element = brw->vb.nr_enabled + needs_sgvs_element;
348 BEGIN_BATCH(3);
349 OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
350 OUT_BATCH(element);
351 OUT_BATCH(0);
352 ADVANCE_BATCH();
353 }
354 }
355
356 const struct brw_tracked_state gen8_vertices = {
357 .dirty = {
358 .mesa = _NEW_POLYGON,
359 .brw = BRW_NEW_BATCH |
360 BRW_NEW_BLORP |
361 BRW_NEW_VERTICES |
362 BRW_NEW_VS_PROG_DATA,
363 },
364 .emit = gen8_emit_vertices,
365 };
366
367 static void
gen8_emit_index_buffer(struct brw_context * brw)368 gen8_emit_index_buffer(struct brw_context *brw)
369 {
370 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
371 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
372
373 if (index_buffer == NULL)
374 return;
375
376 BEGIN_BATCH(5);
377 OUT_BATCH(CMD_INDEX_BUFFER << 16 | (5 - 2));
378 OUT_BATCH(brw_get_index_type(index_buffer->type) | mocs_wb);
379 OUT_RELOC64(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, 0);
380 OUT_BATCH(brw->ib.size);
381 ADVANCE_BATCH();
382 }
383
384 const struct brw_tracked_state gen8_index_buffer = {
385 .dirty = {
386 .mesa = 0,
387 .brw = BRW_NEW_BATCH |
388 BRW_NEW_BLORP |
389 BRW_NEW_INDEX_BUFFER,
390 },
391 .emit = gen8_emit_index_buffer,
392 };
393
394 static void
gen8_emit_vf_topology(struct brw_context * brw)395 gen8_emit_vf_topology(struct brw_context *brw)
396 {
397 BEGIN_BATCH(2);
398 OUT_BATCH(_3DSTATE_VF_TOPOLOGY << 16 | (2 - 2));
399 OUT_BATCH(brw->primitive);
400 ADVANCE_BATCH();
401 }
402
403 const struct brw_tracked_state gen8_vf_topology = {
404 .dirty = {
405 .mesa = 0,
406 .brw = BRW_NEW_BLORP |
407 BRW_NEW_PRIMITIVE,
408 },
409 .emit = gen8_emit_vf_topology,
410 };
411