1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "util/u_memory.h"
34 #include "draw/draw_context.h"
35 #include "draw/draw_private.h"
36 #include "draw/draw_vbuf.h"
37 #include "draw/draw_vertex.h"
38 #include "draw/draw_pt.h"
39 #include "draw/draw_gs.h"
40 #include "translate/translate.h"
41 #include "translate/translate_cache.h"
42
43 /* The simplest 'middle end' in the new vertex code.
44 *
45 * The responsibilities of a middle end are to:
46 * - perform vertex fetch using
47 * - draw vertex element/buffer state
48 * - a list of fetch indices we received as an input
49 * - run the vertex shader
50 * - cliptest,
51 * - clip coord calculation
52 * - viewport transformation
53 * - if necessary, run the primitive pipeline, passing it:
54 * - a linear array of vertex_header vertices constructed here
55 * - a set of draw indices we received as an input
56 * - otherwise, drive the hw backend,
57 * - allocate space for hardware format vertices
58 * - translate the vertex-shader output vertices to hw format
59 * - calling the backend draw functions.
60 *
61 * For convenience, we provide a helper function to drive the hardware
62 * backend given similar inputs to those required to run the pipeline.
63 *
64 * In the case of passthrough mode, many of these actions are disabled
65 * or noops, so we end up doing:
66 *
67 * - perform vertex fetch
68 * - drive the hw backend
69 *
70 * IE, basically just vertex fetch to post-vs-format vertices,
71 * followed by a call to the backend helper function.
72 */
73
74
75 struct fetch_emit_middle_end {
76 struct draw_pt_middle_end base;
77 struct draw_context *draw;
78
79 struct translate *translate;
80 const struct vertex_info *vinfo;
81
82 /* Cache point size somewhere it's address won't change:
83 */
84 float point_size;
85
86 struct translate_cache *cache;
87 };
88
89
fetch_emit_prepare(struct draw_pt_middle_end * middle,unsigned prim,unsigned opt,unsigned * max_vertices)90 static void fetch_emit_prepare( struct draw_pt_middle_end *middle,
91 unsigned prim,
92 unsigned opt,
93 unsigned *max_vertices )
94 {
95 struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
96 struct draw_context *draw = feme->draw;
97 const struct vertex_info *vinfo;
98 unsigned i, dst_offset;
99 struct translate_key key;
100 unsigned gs_out_prim = (draw->gs.geometry_shader ?
101 draw->gs.geometry_shader->output_primitive :
102 prim);
103
104 draw->render->set_primitive(draw->render, gs_out_prim);
105
106 /* Must do this after set_primitive() above:
107 */
108 vinfo = feme->vinfo = draw->render->get_vertex_info(draw->render);
109
110 /* Transform from API vertices to HW vertices, skipping the
111 * pipeline_vertex intermediate step.
112 */
113 dst_offset = 0;
114 memset(&key, 0, sizeof(key));
115
116 for (i = 0; i < vinfo->num_attribs; i++) {
117 const struct pipe_vertex_element *src = &draw->pt.vertex_element[vinfo->attrib[i].src_index];
118
119 unsigned emit_sz = 0;
120 unsigned input_format = src->src_format;
121 unsigned input_buffer = src->vertex_buffer_index;
122 unsigned input_offset = src->src_offset;
123 unsigned output_format;
124
125 output_format = draw_translate_vinfo_format(vinfo->attrib[i].emit);
126 emit_sz = draw_translate_vinfo_size(vinfo->attrib[i].emit);
127
128 if (vinfo->attrib[i].emit == EMIT_OMIT)
129 continue;
130
131 if (vinfo->attrib[i].emit == EMIT_1F_PSIZE) {
132 input_format = PIPE_FORMAT_R32_FLOAT;
133 input_buffer = draw->pt.nr_vertex_buffers;
134 input_offset = 0;
135 }
136
137 key.element[i].type = TRANSLATE_ELEMENT_NORMAL;
138 key.element[i].input_format = input_format;
139 key.element[i].input_buffer = input_buffer;
140 key.element[i].input_offset = input_offset;
141 key.element[i].instance_divisor = src->instance_divisor;
142 key.element[i].output_format = output_format;
143 key.element[i].output_offset = dst_offset;
144
145 dst_offset += emit_sz;
146 }
147
148 key.nr_elements = vinfo->num_attribs;
149 key.output_stride = vinfo->size * 4;
150
151 /* Don't bother with caching at this stage:
152 */
153 if (!feme->translate ||
154 translate_key_compare(&feme->translate->key, &key) != 0)
155 {
156 translate_key_sanitize(&key);
157 feme->translate = translate_cache_find(feme->cache,
158 &key);
159
160 feme->translate->set_buffer(feme->translate,
161 draw->pt.nr_vertex_buffers,
162 &feme->point_size,
163 0,
164 ~0);
165 }
166
167 feme->point_size = draw->rasterizer->point_size;
168
169 for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
170 feme->translate->set_buffer(feme->translate,
171 i,
172 ((char *)draw->pt.user.vbuffer[i].map +
173 draw->pt.vertex_buffer[i].buffer_offset),
174 draw->pt.vertex_buffer[i].stride,
175 draw->pt.max_index);
176 }
177
178 *max_vertices = (draw->render->max_vertex_buffer_bytes /
179 (vinfo->size * 4));
180 }
181
182
183 static void
fetch_emit_bind_parameters(struct draw_pt_middle_end * middle)184 fetch_emit_bind_parameters(struct draw_pt_middle_end *middle)
185 {
186 /* No-op? */
187 }
188
189
fetch_emit_run(struct draw_pt_middle_end * middle,const unsigned * fetch_elts,unsigned fetch_count,const ushort * draw_elts,unsigned draw_count,unsigned prim_flags)190 static void fetch_emit_run( struct draw_pt_middle_end *middle,
191 const unsigned *fetch_elts,
192 unsigned fetch_count,
193 const ushort *draw_elts,
194 unsigned draw_count,
195 unsigned prim_flags )
196 {
197 struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
198 struct draw_context *draw = feme->draw;
199 void *hw_verts;
200
201 /* XXX: need to flush to get prim_vbuf.c to release its allocation??
202 */
203 draw_do_flush( draw, DRAW_FLUSH_BACKEND );
204
205 draw->render->allocate_vertices( draw->render,
206 (ushort)feme->translate->key.output_stride,
207 (ushort)fetch_count );
208
209 hw_verts = draw->render->map_vertices( draw->render );
210 if (!hw_verts) {
211 debug_warn_once("vertex buffer allocation failed (out of memory?)");
212 return;
213 }
214
215 /* Single routine to fetch vertices and emit HW verts.
216 */
217 feme->translate->run_elts( feme->translate,
218 fetch_elts,
219 fetch_count,
220 draw->start_instance,
221 draw->instance_id,
222 hw_verts );
223
224 if (0) {
225 unsigned i;
226 for (i = 0; i < fetch_count; i++) {
227 debug_printf("\n\nvertex %d:\n", i);
228 draw_dump_emitted_vertex( feme->vinfo,
229 (const uint8_t *)hw_verts + feme->vinfo->size * 4 * i );
230 }
231 }
232
233 draw->render->unmap_vertices( draw->render,
234 0,
235 (ushort)(fetch_count - 1) );
236
237 /* XXX: Draw arrays path to avoid re-emitting index list again and
238 * again.
239 */
240 draw->render->draw_elements( draw->render,
241 draw_elts,
242 draw_count );
243
244 /* Done -- that was easy, wasn't it:
245 */
246 draw->render->release_vertices( draw->render );
247
248 }
249
250
fetch_emit_run_linear(struct draw_pt_middle_end * middle,unsigned start,unsigned count,unsigned prim_flags)251 static void fetch_emit_run_linear( struct draw_pt_middle_end *middle,
252 unsigned start,
253 unsigned count,
254 unsigned prim_flags )
255 {
256 struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
257 struct draw_context *draw = feme->draw;
258 void *hw_verts;
259
260 /* XXX: need to flush to get prim_vbuf.c to release its allocation??
261 */
262 draw_do_flush( draw, DRAW_FLUSH_BACKEND );
263
264 if (!draw->render->allocate_vertices( draw->render,
265 (ushort)feme->translate->key.output_stride,
266 (ushort)count ))
267 goto fail;
268
269 hw_verts = draw->render->map_vertices( draw->render );
270 if (!hw_verts)
271 goto fail;
272
273 /* Single routine to fetch vertices and emit HW verts.
274 */
275 feme->translate->run( feme->translate,
276 start,
277 count,
278 draw->start_instance,
279 draw->instance_id,
280 hw_verts );
281
282 if (0) {
283 unsigned i;
284 for (i = 0; i < count; i++) {
285 debug_printf("\n\nvertex %d:\n", i);
286 draw_dump_emitted_vertex( feme->vinfo,
287 (const uint8_t *)hw_verts + feme->vinfo->size * 4 * i );
288 }
289 }
290
291 draw->render->unmap_vertices( draw->render, 0, count - 1 );
292
293 /* XXX: Draw arrays path to avoid re-emitting index list again and
294 * again.
295 */
296 draw->render->draw_arrays( draw->render, 0, count );
297
298 /* Done -- that was easy, wasn't it:
299 */
300 draw->render->release_vertices( draw->render );
301 return;
302
303 fail:
304 debug_warn_once("allocate or map of vertex buffer failed (out of memory?)");
305 return;
306 }
307
308
fetch_emit_run_linear_elts(struct draw_pt_middle_end * middle,unsigned start,unsigned count,const ushort * draw_elts,unsigned draw_count,unsigned prim_flags)309 static boolean fetch_emit_run_linear_elts( struct draw_pt_middle_end *middle,
310 unsigned start,
311 unsigned count,
312 const ushort *draw_elts,
313 unsigned draw_count,
314 unsigned prim_flags )
315 {
316 struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
317 struct draw_context *draw = feme->draw;
318 void *hw_verts;
319
320 /* XXX: need to flush to get prim_vbuf.c to release its allocation??
321 */
322 draw_do_flush( draw, DRAW_FLUSH_BACKEND );
323
324 if (!draw->render->allocate_vertices( draw->render,
325 (ushort)feme->translate->key.output_stride,
326 (ushort)count ))
327 return FALSE;
328
329 hw_verts = draw->render->map_vertices( draw->render );
330 if (!hw_verts)
331 return FALSE;
332
333 /* Single routine to fetch vertices and emit HW verts.
334 */
335 feme->translate->run( feme->translate,
336 start,
337 count,
338 draw->start_instance,
339 draw->instance_id,
340 hw_verts );
341
342 draw->render->unmap_vertices( draw->render, 0, (ushort)(count - 1) );
343
344 /* XXX: Draw arrays path to avoid re-emitting index list again and
345 * again.
346 */
347 draw->render->draw_elements( draw->render,
348 draw_elts,
349 draw_count );
350
351 /* Done -- that was easy, wasn't it:
352 */
353 draw->render->release_vertices( draw->render );
354
355 return TRUE;
356 }
357
358
fetch_emit_finish(struct draw_pt_middle_end * middle)359 static void fetch_emit_finish( struct draw_pt_middle_end *middle )
360 {
361 /* nothing to do */
362 }
363
364
fetch_emit_destroy(struct draw_pt_middle_end * middle)365 static void fetch_emit_destroy( struct draw_pt_middle_end *middle )
366 {
367 struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
368
369 if (feme->cache)
370 translate_cache_destroy(feme->cache);
371
372 FREE(middle);
373 }
374
375
draw_pt_fetch_emit(struct draw_context * draw)376 struct draw_pt_middle_end *draw_pt_fetch_emit( struct draw_context *draw )
377 {
378 struct fetch_emit_middle_end *fetch_emit = CALLOC_STRUCT( fetch_emit_middle_end );
379 if (!fetch_emit)
380 return NULL;
381
382 fetch_emit->cache = translate_cache_create();
383 if (!fetch_emit->cache) {
384 FREE(fetch_emit);
385 return NULL;
386 }
387
388 fetch_emit->base.prepare = fetch_emit_prepare;
389 fetch_emit->base.bind_parameters = fetch_emit_bind_parameters;
390 fetch_emit->base.run = fetch_emit_run;
391 fetch_emit->base.run_linear = fetch_emit_run_linear;
392 fetch_emit->base.run_linear_elts = fetch_emit_run_linear_elts;
393 fetch_emit->base.finish = fetch_emit_finish;
394 fetch_emit->base.destroy = fetch_emit_destroy;
395
396 fetch_emit->draw = draw;
397
398 return &fetch_emit->base;
399 }
400
401