1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VMware, Inc.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Keith Whitwell <keithw@vmware.com>
33 */
34
35 #include "main/glheader.h"
36 #include "main/imports.h"
37 #include "main/mtypes.h"
38 #include "main/light.h"
39 #include "main/enums.h"
40 #include "main/state.h"
41
42 #include "vbo/vbo.h"
43 #include "tnl/tnl.h"
44 #include "tnl/t_pipeline.h"
45
46 #include "radeon_common.h"
47 #include "radeon_context.h"
48 #include "radeon_state.h"
49 #include "radeon_ioctl.h"
50 #include "radeon_tcl.h"
51 #include "radeon_swtcl.h"
52 #include "radeon_maos.h"
53 #include "radeon_common_context.h"
54
55
56
57 /*
58 * Render unclipped vertex buffers by emitting vertices directly to
59 * dma buffers. Use strip/fan hardware primitives where possible.
60 * Try to simulate missing primitives with indexed vertices.
61 */
62 #define HAVE_POINTS 1
63 #define HAVE_LINES 1
64 #define HAVE_LINE_LOOP 0
65 #define HAVE_LINE_STRIPS 1
66 #define HAVE_TRIANGLES 1
67 #define HAVE_TRI_STRIPS 1
68 #define HAVE_TRI_FANS 1
69 #define HAVE_QUADS 0
70 #define HAVE_QUAD_STRIPS 0
71 #define HAVE_POLYGONS 1
72 #define HAVE_ELTS 1
73
74
75 #define HW_POINTS RADEON_CP_VC_CNTL_PRIM_TYPE_POINT
76 #define HW_LINES RADEON_CP_VC_CNTL_PRIM_TYPE_LINE
77 #define HW_LINE_LOOP 0
78 #define HW_LINE_STRIP RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP
79 #define HW_TRIANGLES RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST
80 #define HW_TRIANGLE_STRIP_0 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP
81 #define HW_TRIANGLE_STRIP_1 0
82 #define HW_TRIANGLE_FAN RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN
83 #define HW_QUADS 0
84 #define HW_QUAD_STRIP 0
85 #define HW_POLYGON RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN
86
87
88 static GLboolean discrete_prim[0x10] = {
89 0, /* 0 none */
90 1, /* 1 points */
91 1, /* 2 lines */
92 0, /* 3 line_strip */
93 1, /* 4 tri_list */
94 0, /* 5 tri_fan */
95 0, /* 6 tri_type2 */
96 1, /* 7 rect list (unused) */
97 1, /* 8 3vert point */
98 1, /* 9 3vert line */
99 0,
100 0,
101 0,
102 0,
103 0,
104 0,
105 };
106
107
108 #define LOCAL_VARS r100ContextPtr rmesa = R100_CONTEXT(ctx)
109 #define ELT_TYPE GLushort
110
111 #define ELT_INIT(prim, hw_prim) \
112 radeonTclPrimitive( ctx, prim, hw_prim | RADEON_CP_VC_CNTL_PRIM_WALK_IND )
113
114 #define GET_MESA_ELTS() rmesa->tcl.Elts
115
116
117 /* Don't really know how many elts will fit in what's left of cmdbuf,
118 * as there is state to emit, etc:
119 */
120
121 /* Testing on isosurf shows a maximum around here. Don't know if it's
122 * the card or driver or kernel module that is causing the behaviour.
123 */
124 #define GET_MAX_HW_ELTS() 300
125
126
127 #define RESET_STIPPLE() do { \
128 RADEON_STATECHANGE( rmesa, lin ); \
129 radeonEmitState(&rmesa->radeon); \
130 } while (0)
131
132 #define AUTO_STIPPLE( mode ) do { \
133 RADEON_STATECHANGE( rmesa, lin ); \
134 if (mode) \
135 rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] |= \
136 RADEON_LINE_PATTERN_AUTO_RESET; \
137 else \
138 rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] &= \
139 ~RADEON_LINE_PATTERN_AUTO_RESET; \
140 radeonEmitState(&rmesa->radeon); \
141 } while (0)
142
143
144
145 #define ALLOC_ELTS(nr) radeonAllocElts( rmesa, nr )
146
radeonAllocElts(r100ContextPtr rmesa,GLuint nr)147 static GLushort *radeonAllocElts( r100ContextPtr rmesa, GLuint nr )
148 {
149 if (rmesa->radeon.dma.flush)
150 rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
151
152 radeonEmitAOS( rmesa,
153 rmesa->radeon.tcl.aos_count, 0 );
154
155 return radeonAllocEltsOpenEnded( rmesa, rmesa->tcl.vertex_format,
156 rmesa->tcl.hw_primitive, nr );
157 }
158
159 #define CLOSE_ELTS() if (0) RADEON_NEWPRIM( rmesa )
160
161
162
163 /* TODO: Try to extend existing primitive if both are identical,
164 * discrete and there are no intervening state changes. (Somewhat
165 * duplicates changes to DrawArrays code)
166 */
radeonEmitPrim(struct gl_context * ctx,GLenum prim,GLuint hwprim,GLuint start,GLuint count)167 static void radeonEmitPrim( struct gl_context *ctx,
168 GLenum prim,
169 GLuint hwprim,
170 GLuint start,
171 GLuint count)
172 {
173 r100ContextPtr rmesa = R100_CONTEXT( ctx );
174 radeonTclPrimitive( ctx, prim, hwprim );
175
176 radeonEmitAOS( rmesa,
177 rmesa->radeon.tcl.aos_count,
178 start );
179
180 /* Why couldn't this packet have taken an offset param?
181 */
182 radeonEmitVbufPrim( rmesa,
183 rmesa->tcl.vertex_format,
184 rmesa->tcl.hw_primitive,
185 count - start );
186 }
187
188 #define EMIT_PRIM( ctx, prim, hwprim, start, count ) do { \
189 radeonEmitPrim( ctx, prim, hwprim, start, count ); \
190 (void) rmesa; } while (0)
191
192 #define MAX_CONVERSION_SIZE 40
193
194 /* Try & join small primitives
195 */
196 #if 0
197 #define PREFER_DISCRETE_ELT_PRIM( NR, PRIM ) 0
198 #else
199 #define PREFER_DISCRETE_ELT_PRIM( NR, PRIM ) \
200 ((NR) < 20 || \
201 ((NR) < 40 && \
202 rmesa->tcl.hw_primitive == (PRIM| \
203 RADEON_CP_VC_CNTL_PRIM_WALK_IND| \
204 RADEON_CP_VC_CNTL_TCL_ENABLE)))
205 #endif
206
207 #ifdef MESA_BIG_ENDIAN
208 /* We could do without (most of) this ugliness if dest was always 32 bit word aligned... */
209 #define EMIT_ELT(dest, offset, x) do { \
210 int off = offset + ( ( (uintptr_t)dest & 0x2 ) >> 1 ); \
211 GLushort *des = (GLushort *)( (uintptr_t)dest & ~0x2 ); \
212 (des)[ off + 1 - 2 * ( off & 1 ) ] = (GLushort)(x); \
213 (void)rmesa; } while (0)
214 #else
215 #define EMIT_ELT(dest, offset, x) do { \
216 (dest)[offset] = (GLushort) (x); \
217 (void)rmesa; } while (0)
218 #endif
219
220 #define EMIT_TWO_ELTS(dest, offset, x, y) *(GLuint *)(dest+offset) = ((y)<<16)|(x);
221
222
223
224 #define TAG(x) tcl_##x
225 #include "tnl_dd/t_dd_dmatmp2.h"
226
227 /**********************************************************************/
228 /* External entrypoints */
229 /**********************************************************************/
230
radeonEmitPrimitive(struct gl_context * ctx,GLuint first,GLuint last,GLuint flags)231 void radeonEmitPrimitive( struct gl_context *ctx,
232 GLuint first,
233 GLuint last,
234 GLuint flags )
235 {
236 tcl_render_tab_verts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
237 }
238
radeonEmitEltPrimitive(struct gl_context * ctx,GLuint first,GLuint last,GLuint flags)239 void radeonEmitEltPrimitive( struct gl_context *ctx,
240 GLuint first,
241 GLuint last,
242 GLuint flags )
243 {
244 tcl_render_tab_elts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
245 }
246
radeonTclPrimitive(struct gl_context * ctx,GLenum prim,int hw_prim)247 void radeonTclPrimitive( struct gl_context *ctx,
248 GLenum prim,
249 int hw_prim )
250 {
251 r100ContextPtr rmesa = R100_CONTEXT(ctx);
252 GLuint se_cntl;
253 GLuint newprim = hw_prim | RADEON_CP_VC_CNTL_TCL_ENABLE;
254
255 radeon_prepare_render(&rmesa->radeon);
256 if (rmesa->radeon.NewGLState)
257 radeonValidateState( ctx );
258
259 if (newprim != rmesa->tcl.hw_primitive ||
260 !discrete_prim[hw_prim&0xf]) {
261 RADEON_NEWPRIM( rmesa );
262 rmesa->tcl.hw_primitive = newprim;
263 }
264
265 se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
266 se_cntl &= ~RADEON_FLAT_SHADE_VTX_LAST;
267
268 if (prim == GL_POLYGON && ctx->Light.ShadeModel == GL_FLAT)
269 se_cntl |= RADEON_FLAT_SHADE_VTX_0;
270 else
271 se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;
272
273 if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
274 RADEON_STATECHANGE( rmesa, set );
275 rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
276 }
277 }
278
279 /**
280 * Predict total emit size for next rendering operation so there is no flush in middle of rendering
281 * Prediction has to aim towards the best possible value that is worse than worst case scenario
282 */
radeonEnsureEmitSize(struct gl_context * ctx,GLuint inputs)283 static GLuint radeonEnsureEmitSize( struct gl_context * ctx , GLuint inputs )
284 {
285 r100ContextPtr rmesa = R100_CONTEXT(ctx);
286 TNLcontext *tnl = TNL_CONTEXT(ctx);
287 struct vertex_buffer *VB = &tnl->vb;
288 GLuint space_required;
289 GLuint state_size;
290 GLuint nr_aos = 1; /* radeonEmitArrays does always emit one */
291 int i;
292 /* list of flags that are allocating aos object */
293 const GLuint flags_to_check[] = {
294 VERT_BIT_NORMAL,
295 VERT_BIT_COLOR0,
296 VERT_BIT_COLOR1,
297 VERT_BIT_FOG
298 };
299 /* predict number of aos to emit */
300 for (i=0; i < sizeof(flags_to_check)/sizeof(flags_to_check[0]); ++i)
301 {
302 if (inputs & flags_to_check[i])
303 ++nr_aos;
304 }
305 for (i = 0; i < ctx->Const.MaxTextureUnits; ++i)
306 {
307 if (inputs & VERT_BIT_TEX(i))
308 ++nr_aos;
309 }
310
311 {
312 /* count the prediction for state size */
313 space_required = 0;
314 state_size = radeonCountStateEmitSize( &rmesa->radeon );
315 /* tcl may be changed in radeonEmitArrays so account for it if not dirty */
316 if (!rmesa->hw.tcl.dirty)
317 state_size += rmesa->hw.tcl.check( &rmesa->radeon.glCtx, &rmesa->hw.tcl );
318 /* predict size for elements */
319 for (i = 0; i < VB->PrimitiveCount; ++i)
320 {
321 /* If primitive.count is less than MAX_CONVERSION_SIZE
322 rendering code may decide convert to elts.
323 In that case we have to make pessimistic prediction.
324 and use larger of 2 paths. */
325 const GLuint elts = ELTS_BUFSZ(nr_aos);
326 const GLuint index = INDEX_BUFSZ;
327 const GLuint vbuf = VBUF_BUFSZ;
328 if (!VB->Primitive[i].count)
329 continue;
330 if ( (!VB->Elts && VB->Primitive[i].count >= MAX_CONVERSION_SIZE)
331 || vbuf > index + elts)
332 space_required += vbuf;
333 else
334 space_required += index + elts;
335 space_required += VB->Primitive[i].count * 3;
336 space_required += AOS_BUFSZ(nr_aos);
337 }
338 space_required += SCISSOR_BUFSZ;
339 }
340 /* flush the buffer in case we need more than is left. */
341 if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required, __func__))
342 return space_required + radeonCountStateEmitSize( &rmesa->radeon );
343 else
344 return space_required + state_size;
345 }
346
347 /**********************************************************************/
348 /* Render pipeline stage */
349 /**********************************************************************/
350
351
352 /* TCL render.
353 */
radeon_run_tcl_render(struct gl_context * ctx,struct tnl_pipeline_stage * stage)354 static GLboolean radeon_run_tcl_render( struct gl_context *ctx,
355 struct tnl_pipeline_stage *stage )
356 {
357 r100ContextPtr rmesa = R100_CONTEXT(ctx);
358 TNLcontext *tnl = TNL_CONTEXT(ctx);
359 struct vertex_buffer *VB = &tnl->vb;
360 GLuint inputs = VERT_BIT_POS | VERT_BIT_COLOR0;
361 GLuint i;
362 GLuint emit_end;
363
364 /* TODO: separate this from the swtnl pipeline
365 */
366 if (rmesa->radeon.TclFallback)
367 return GL_TRUE; /* fallback to software t&l */
368
369 if (VB->Count == 0)
370 return GL_FALSE;
371
372 /* NOTE: inputs != tnl->render_inputs - these are the untransformed
373 * inputs.
374 */
375 if (ctx->Light.Enabled) {
376 inputs |= VERT_BIT_NORMAL;
377 }
378
379 if (_mesa_need_secondary_color(ctx)) {
380 inputs |= VERT_BIT_COLOR1;
381 }
382
383 if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) {
384 inputs |= VERT_BIT_FOG;
385 }
386
387 for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) {
388 if (ctx->Texture.Unit[i]._Current) {
389 /* TODO: probably should not emit texture coords when texgen is enabled */
390 if (rmesa->TexGenNeedNormals[i]) {
391 inputs |= VERT_BIT_NORMAL;
392 }
393 inputs |= VERT_BIT_TEX(i);
394 }
395 }
396
397 radeonReleaseArrays( ctx, ~0 );
398 emit_end = radeonEnsureEmitSize( ctx, inputs )
399 + rmesa->radeon.cmdbuf.cs->cdw;
400 radeonEmitArrays( ctx, inputs );
401
402 rmesa->tcl.Elts = VB->Elts;
403
404 for (i = 0 ; i < VB->PrimitiveCount ; i++)
405 {
406 GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
407 GLuint start = VB->Primitive[i].start;
408 GLuint length = VB->Primitive[i].count;
409
410 if (!length)
411 continue;
412
413 if (rmesa->tcl.Elts)
414 radeonEmitEltPrimitive( ctx, start, start+length, prim );
415 else
416 radeonEmitPrimitive( ctx, start, start+length, prim );
417 }
418
419 if (emit_end < rmesa->radeon.cmdbuf.cs->cdw)
420 WARN_ONCE("Rendering was %d commands larger than predicted size."
421 " We might overflow command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - emit_end);
422
423 return GL_FALSE; /* finished the pipe */
424 }
425
426
427
428 /* Initial state for tcl stage.
429 */
430 const struct tnl_pipeline_stage _radeon_tcl_stage =
431 {
432 "radeon render",
433 NULL,
434 NULL,
435 NULL,
436 NULL,
437 radeon_run_tcl_render /* run */
438 };
439
440
441
442 /**********************************************************************/
443 /* Validate state at pipeline start */
444 /**********************************************************************/
445
446
447 /*-----------------------------------------------------------------------
448 * Manage TCL fallbacks
449 */
450
451
transition_to_swtnl(struct gl_context * ctx)452 static void transition_to_swtnl( struct gl_context *ctx )
453 {
454 r100ContextPtr rmesa = R100_CONTEXT(ctx);
455 TNLcontext *tnl = TNL_CONTEXT(ctx);
456 GLuint se_cntl;
457
458 RADEON_NEWPRIM( rmesa );
459 rmesa->swtcl.vertex_format = 0;
460
461 radeonChooseVertexState( ctx );
462 radeonChooseRenderState( ctx );
463
464 _tnl_validate_shine_tables( ctx );
465
466 tnl->Driver.NotifyMaterialChange =
467 _tnl_validate_shine_tables;
468
469 radeonReleaseArrays( ctx, ~0 );
470
471 se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
472 se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;
473
474 if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
475 RADEON_STATECHANGE( rmesa, set );
476 rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
477 }
478 }
479
480
transition_to_hwtnl(struct gl_context * ctx)481 static void transition_to_hwtnl( struct gl_context *ctx )
482 {
483 r100ContextPtr rmesa = R100_CONTEXT(ctx);
484 TNLcontext *tnl = TNL_CONTEXT(ctx);
485 GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
486
487 se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
488 RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
489 RADEON_VTX_W0_IS_NOT_1_OVER_W0);
490 se_coord_fmt |= RADEON_VTX_W0_IS_NOT_1_OVER_W0;
491
492 if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
493 RADEON_STATECHANGE( rmesa, set );
494 rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
495 _tnl_need_projected_coords( ctx, GL_FALSE );
496 }
497
498 radeonUpdateMaterial( ctx );
499
500 tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;
501
502 if ( rmesa->radeon.dma.flush )
503 rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
504
505 rmesa->radeon.dma.flush = NULL;
506 rmesa->swtcl.vertex_format = 0;
507
508 // if (rmesa->swtcl.indexed_verts.buf)
509 // radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
510 // __func__ );
511
512 if (RADEON_DEBUG & RADEON_FALLBACKS)
513 fprintf(stderr, "Radeon end tcl fallback\n");
514 }
515
516 static char *fallbackStrings[] = {
517 "Rasterization fallback",
518 "Unfilled triangles",
519 "Twosided lighting, differing materials",
520 "Materials in VB (maybe between begin/end)",
521 "Texgen unit 0",
522 "Texgen unit 1",
523 "Texgen unit 2",
524 "User disable",
525 "Fogcoord with separate specular lighting"
526 };
527
528
getFallbackString(GLuint bit)529 static char *getFallbackString(GLuint bit)
530 {
531 int i = 0;
532 while (bit > 1) {
533 i++;
534 bit >>= 1;
535 }
536 return fallbackStrings[i];
537 }
538
539
540
radeonTclFallback(struct gl_context * ctx,GLuint bit,GLboolean mode)541 void radeonTclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
542 {
543 r100ContextPtr rmesa = R100_CONTEXT(ctx);
544 GLuint oldfallback = rmesa->radeon.TclFallback;
545
546 if (mode) {
547 rmesa->radeon.TclFallback |= bit;
548 if (oldfallback == 0) {
549 if (RADEON_DEBUG & RADEON_FALLBACKS)
550 fprintf(stderr, "Radeon begin tcl fallback %s\n",
551 getFallbackString( bit ));
552 transition_to_swtnl( ctx );
553 }
554 }
555 else {
556 rmesa->radeon.TclFallback &= ~bit;
557 if (oldfallback == bit) {
558 if (RADEON_DEBUG & RADEON_FALLBACKS)
559 fprintf(stderr, "Radeon end tcl fallback %s\n",
560 getFallbackString( bit ));
561 transition_to_hwtnl( ctx );
562 }
563 }
564 }
565