1 /**************************************************************************
2
3 Copyright 2000, 2001 VA Linux Systems Inc., Fremont, California.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **************************************************************************/
28
29 /*
30 * Authors:
31 * Gareth Hughes <gareth@valinux.com>
32 * Keith Whitwell <keithw@vmware.com>
33 */
34
35 #include "main/glheader.h"
36 #include "main/imports.h"
37 #include "main/enums.h"
38 #include "main/light.h"
39 #include "main/context.h"
40 #include "main/framebuffer.h"
41 #include "main/fbobject.h"
42 #include "util/simple_list.h"
43 #include "main/state.h"
44 #include "main/core.h"
45 #include "main/stencil.h"
46 #include "main/viewport.h"
47
48 #include "vbo/vbo.h"
49 #include "tnl/tnl.h"
50 #include "tnl/t_pipeline.h"
51 #include "swrast_setup/swrast_setup.h"
52 #include "drivers/common/meta.h"
53 #include "util/bitscan.h"
54
55 #include "radeon_context.h"
56 #include "radeon_mipmap_tree.h"
57 #include "radeon_ioctl.h"
58 #include "radeon_state.h"
59 #include "radeon_tcl.h"
60 #include "radeon_tex.h"
61 #include "radeon_swtcl.h"
62
63 static void radeonUpdateSpecular( struct gl_context *ctx );
64
65 /* =============================================================
66 * Alpha blending
67 */
68
radeonAlphaFunc(struct gl_context * ctx,GLenum func,GLfloat ref)69 static void radeonAlphaFunc( struct gl_context *ctx, GLenum func, GLfloat ref )
70 {
71 r100ContextPtr rmesa = R100_CONTEXT(ctx);
72 int pp_misc = rmesa->hw.ctx.cmd[CTX_PP_MISC];
73 GLubyte refByte;
74
75 CLAMPED_FLOAT_TO_UBYTE(refByte, ref);
76
77 RADEON_STATECHANGE( rmesa, ctx );
78
79 pp_misc &= ~(RADEON_ALPHA_TEST_OP_MASK | RADEON_REF_ALPHA_MASK);
80 pp_misc |= (refByte & RADEON_REF_ALPHA_MASK);
81
82 switch ( func ) {
83 case GL_NEVER:
84 pp_misc |= RADEON_ALPHA_TEST_FAIL;
85 break;
86 case GL_LESS:
87 pp_misc |= RADEON_ALPHA_TEST_LESS;
88 break;
89 case GL_EQUAL:
90 pp_misc |= RADEON_ALPHA_TEST_EQUAL;
91 break;
92 case GL_LEQUAL:
93 pp_misc |= RADEON_ALPHA_TEST_LEQUAL;
94 break;
95 case GL_GREATER:
96 pp_misc |= RADEON_ALPHA_TEST_GREATER;
97 break;
98 case GL_NOTEQUAL:
99 pp_misc |= RADEON_ALPHA_TEST_NEQUAL;
100 break;
101 case GL_GEQUAL:
102 pp_misc |= RADEON_ALPHA_TEST_GEQUAL;
103 break;
104 case GL_ALWAYS:
105 pp_misc |= RADEON_ALPHA_TEST_PASS;
106 break;
107 }
108
109 rmesa->hw.ctx.cmd[CTX_PP_MISC] = pp_misc;
110 }
111
radeonBlendEquationSeparate(struct gl_context * ctx,GLenum modeRGB,GLenum modeA)112 static void radeonBlendEquationSeparate( struct gl_context *ctx,
113 GLenum modeRGB, GLenum modeA )
114 {
115 r100ContextPtr rmesa = R100_CONTEXT(ctx);
116 GLuint b = rmesa->hw.ctx.cmd[CTX_RB3D_BLENDCNTL] & ~RADEON_COMB_FCN_MASK;
117 GLboolean fallback = GL_FALSE;
118
119 assert( modeRGB == modeA );
120
121 switch ( modeRGB ) {
122 case GL_FUNC_ADD:
123 case GL_LOGIC_OP:
124 b |= RADEON_COMB_FCN_ADD_CLAMP;
125 break;
126
127 case GL_FUNC_SUBTRACT:
128 b |= RADEON_COMB_FCN_SUB_CLAMP;
129 break;
130
131 default:
132 if (ctx->Color.BlendEnabled)
133 fallback = GL_TRUE;
134 else
135 b |= RADEON_COMB_FCN_ADD_CLAMP;
136 break;
137 }
138
139 FALLBACK( rmesa, RADEON_FALLBACK_BLEND_EQ, fallback );
140 if ( !fallback ) {
141 RADEON_STATECHANGE( rmesa, ctx );
142 rmesa->hw.ctx.cmd[CTX_RB3D_BLENDCNTL] = b;
143 if ( (ctx->Color.ColorLogicOpEnabled || (ctx->Color.BlendEnabled
144 && ctx->Color.Blend[0].EquationRGB == GL_LOGIC_OP)) ) {
145 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_ROP_ENABLE;
146 } else {
147 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_ROP_ENABLE;
148 }
149 }
150 }
151
radeonBlendFuncSeparate(struct gl_context * ctx,GLenum sfactorRGB,GLenum dfactorRGB,GLenum sfactorA,GLenum dfactorA)152 static void radeonBlendFuncSeparate( struct gl_context *ctx,
153 GLenum sfactorRGB, GLenum dfactorRGB,
154 GLenum sfactorA, GLenum dfactorA )
155 {
156 r100ContextPtr rmesa = R100_CONTEXT(ctx);
157 GLuint b = rmesa->hw.ctx.cmd[CTX_RB3D_BLENDCNTL] &
158 ~(RADEON_SRC_BLEND_MASK | RADEON_DST_BLEND_MASK);
159 GLboolean fallback = GL_FALSE;
160
161 switch ( ctx->Color.Blend[0].SrcRGB ) {
162 case GL_ZERO:
163 b |= RADEON_SRC_BLEND_GL_ZERO;
164 break;
165 case GL_ONE:
166 b |= RADEON_SRC_BLEND_GL_ONE;
167 break;
168 case GL_DST_COLOR:
169 b |= RADEON_SRC_BLEND_GL_DST_COLOR;
170 break;
171 case GL_ONE_MINUS_DST_COLOR:
172 b |= RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR;
173 break;
174 case GL_SRC_COLOR:
175 b |= RADEON_SRC_BLEND_GL_SRC_COLOR;
176 break;
177 case GL_ONE_MINUS_SRC_COLOR:
178 b |= RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR;
179 break;
180 case GL_SRC_ALPHA:
181 b |= RADEON_SRC_BLEND_GL_SRC_ALPHA;
182 break;
183 case GL_ONE_MINUS_SRC_ALPHA:
184 b |= RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA;
185 break;
186 case GL_DST_ALPHA:
187 b |= RADEON_SRC_BLEND_GL_DST_ALPHA;
188 break;
189 case GL_ONE_MINUS_DST_ALPHA:
190 b |= RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA;
191 break;
192 case GL_SRC_ALPHA_SATURATE:
193 b |= RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE;
194 break;
195 case GL_CONSTANT_COLOR:
196 case GL_ONE_MINUS_CONSTANT_COLOR:
197 case GL_CONSTANT_ALPHA:
198 case GL_ONE_MINUS_CONSTANT_ALPHA:
199 if (ctx->Color.BlendEnabled)
200 fallback = GL_TRUE;
201 else
202 b |= RADEON_SRC_BLEND_GL_ONE;
203 break;
204 default:
205 break;
206 }
207
208 switch ( ctx->Color.Blend[0].DstRGB ) {
209 case GL_ZERO:
210 b |= RADEON_DST_BLEND_GL_ZERO;
211 break;
212 case GL_ONE:
213 b |= RADEON_DST_BLEND_GL_ONE;
214 break;
215 case GL_SRC_COLOR:
216 b |= RADEON_DST_BLEND_GL_SRC_COLOR;
217 break;
218 case GL_ONE_MINUS_SRC_COLOR:
219 b |= RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR;
220 break;
221 case GL_SRC_ALPHA:
222 b |= RADEON_DST_BLEND_GL_SRC_ALPHA;
223 break;
224 case GL_ONE_MINUS_SRC_ALPHA:
225 b |= RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA;
226 break;
227 case GL_DST_COLOR:
228 b |= RADEON_DST_BLEND_GL_DST_COLOR;
229 break;
230 case GL_ONE_MINUS_DST_COLOR:
231 b |= RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR;
232 break;
233 case GL_DST_ALPHA:
234 b |= RADEON_DST_BLEND_GL_DST_ALPHA;
235 break;
236 case GL_ONE_MINUS_DST_ALPHA:
237 b |= RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA;
238 break;
239 case GL_CONSTANT_COLOR:
240 case GL_ONE_MINUS_CONSTANT_COLOR:
241 case GL_CONSTANT_ALPHA:
242 case GL_ONE_MINUS_CONSTANT_ALPHA:
243 if (ctx->Color.BlendEnabled)
244 fallback = GL_TRUE;
245 else
246 b |= RADEON_DST_BLEND_GL_ZERO;
247 break;
248 default:
249 break;
250 }
251
252 FALLBACK( rmesa, RADEON_FALLBACK_BLEND_FUNC, fallback );
253 if ( !fallback ) {
254 RADEON_STATECHANGE( rmesa, ctx );
255 rmesa->hw.ctx.cmd[CTX_RB3D_BLENDCNTL] = b;
256 }
257 }
258
259
260 /* =============================================================
261 * Depth testing
262 */
263
radeonDepthFunc(struct gl_context * ctx,GLenum func)264 static void radeonDepthFunc( struct gl_context *ctx, GLenum func )
265 {
266 r100ContextPtr rmesa = R100_CONTEXT(ctx);
267
268 RADEON_STATECHANGE( rmesa, ctx );
269 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_Z_TEST_MASK;
270
271 switch ( ctx->Depth.Func ) {
272 case GL_NEVER:
273 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_NEVER;
274 break;
275 case GL_LESS:
276 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_LESS;
277 break;
278 case GL_EQUAL:
279 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_EQUAL;
280 break;
281 case GL_LEQUAL:
282 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_LEQUAL;
283 break;
284 case GL_GREATER:
285 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_GREATER;
286 break;
287 case GL_NOTEQUAL:
288 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_NEQUAL;
289 break;
290 case GL_GEQUAL:
291 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_GEQUAL;
292 break;
293 case GL_ALWAYS:
294 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_TEST_ALWAYS;
295 break;
296 }
297 }
298
299
radeonDepthMask(struct gl_context * ctx,GLboolean flag)300 static void radeonDepthMask( struct gl_context *ctx, GLboolean flag )
301 {
302 r100ContextPtr rmesa = R100_CONTEXT(ctx);
303 RADEON_STATECHANGE( rmesa, ctx );
304
305 if ( ctx->Depth.Mask ) {
306 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_WRITE_ENABLE;
307 } else {
308 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_Z_WRITE_ENABLE;
309 }
310 }
311
312
313 /* =============================================================
314 * Fog
315 */
316
317
radeonFogfv(struct gl_context * ctx,GLenum pname,const GLfloat * param)318 static void radeonFogfv( struct gl_context *ctx, GLenum pname, const GLfloat *param )
319 {
320 r100ContextPtr rmesa = R100_CONTEXT(ctx);
321 union { int i; float f; } c, d;
322 GLubyte col[4];
323
324 switch (pname) {
325 case GL_FOG_MODE:
326 if (!ctx->Fog.Enabled)
327 return;
328 RADEON_STATECHANGE(rmesa, tcl);
329 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~RADEON_TCL_FOG_MASK;
330 switch (ctx->Fog.Mode) {
331 case GL_LINEAR:
332 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= RADEON_TCL_FOG_LINEAR;
333 break;
334 case GL_EXP:
335 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= RADEON_TCL_FOG_EXP;
336 break;
337 case GL_EXP2:
338 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= RADEON_TCL_FOG_EXP2;
339 break;
340 default:
341 return;
342 }
343 /* fallthrough */
344 case GL_FOG_DENSITY:
345 case GL_FOG_START:
346 case GL_FOG_END:
347 if (!ctx->Fog.Enabled)
348 return;
349 c.i = rmesa->hw.fog.cmd[FOG_C];
350 d.i = rmesa->hw.fog.cmd[FOG_D];
351 switch (ctx->Fog.Mode) {
352 case GL_EXP:
353 c.f = 0.0;
354 /* While this is the opposite sign from the DDK, it makes the fog test
355 * pass, and matches r200.
356 */
357 d.f = -ctx->Fog.Density;
358 break;
359 case GL_EXP2:
360 c.f = 0.0;
361 d.f = -(ctx->Fog.Density * ctx->Fog.Density);
362 break;
363 case GL_LINEAR:
364 if (ctx->Fog.Start == ctx->Fog.End) {
365 c.f = 1.0F;
366 d.f = 1.0F;
367 } else {
368 c.f = ctx->Fog.End/(ctx->Fog.End-ctx->Fog.Start);
369 /* While this is the opposite sign from the DDK, it makes the fog
370 * test pass, and matches r200.
371 */
372 d.f = -1.0/(ctx->Fog.End-ctx->Fog.Start);
373 }
374 break;
375 default:
376 break;
377 }
378 if (c.i != rmesa->hw.fog.cmd[FOG_C] || d.i != rmesa->hw.fog.cmd[FOG_D]) {
379 RADEON_STATECHANGE( rmesa, fog );
380 rmesa->hw.fog.cmd[FOG_C] = c.i;
381 rmesa->hw.fog.cmd[FOG_D] = d.i;
382 }
383 break;
384 case GL_FOG_COLOR:
385 RADEON_STATECHANGE( rmesa, ctx );
386 _mesa_unclamped_float_rgba_to_ubyte(col, ctx->Fog.Color );
387 rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] &= ~RADEON_FOG_COLOR_MASK;
388 rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] |=
389 radeonPackColor( 4, col[0], col[1], col[2], 0 );
390 break;
391 case GL_FOG_COORD_SRC:
392 radeonUpdateSpecular( ctx );
393 break;
394 default:
395 return;
396 }
397 }
398
399 /* =============================================================
400 * Culling
401 */
402
radeonCullFace(struct gl_context * ctx,GLenum unused)403 static void radeonCullFace( struct gl_context *ctx, GLenum unused )
404 {
405 r100ContextPtr rmesa = R100_CONTEXT(ctx);
406 GLuint s = rmesa->hw.set.cmd[SET_SE_CNTL];
407 GLuint t = rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL];
408
409 s |= RADEON_FFACE_SOLID | RADEON_BFACE_SOLID;
410 t &= ~(RADEON_CULL_FRONT | RADEON_CULL_BACK);
411
412 if ( ctx->Polygon.CullFlag ) {
413 switch ( ctx->Polygon.CullFaceMode ) {
414 case GL_FRONT:
415 s &= ~RADEON_FFACE_SOLID;
416 t |= RADEON_CULL_FRONT;
417 break;
418 case GL_BACK:
419 s &= ~RADEON_BFACE_SOLID;
420 t |= RADEON_CULL_BACK;
421 break;
422 case GL_FRONT_AND_BACK:
423 s &= ~(RADEON_FFACE_SOLID | RADEON_BFACE_SOLID);
424 t |= (RADEON_CULL_FRONT | RADEON_CULL_BACK);
425 break;
426 }
427 }
428
429 if ( rmesa->hw.set.cmd[SET_SE_CNTL] != s ) {
430 RADEON_STATECHANGE(rmesa, set );
431 rmesa->hw.set.cmd[SET_SE_CNTL] = s;
432 }
433
434 if ( rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] != t ) {
435 RADEON_STATECHANGE(rmesa, tcl );
436 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] = t;
437 }
438 }
439
radeonFrontFace(struct gl_context * ctx,GLenum mode)440 static void radeonFrontFace( struct gl_context *ctx, GLenum mode )
441 {
442 r100ContextPtr rmesa = R100_CONTEXT(ctx);
443 int cull_face = (mode == GL_CW) ? RADEON_FFACE_CULL_CW : RADEON_FFACE_CULL_CCW;
444
445 RADEON_STATECHANGE( rmesa, set );
446 rmesa->hw.set.cmd[SET_SE_CNTL] &= ~RADEON_FFACE_CULL_DIR_MASK;
447
448 RADEON_STATECHANGE( rmesa, tcl );
449 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~RADEON_CULL_FRONT_IS_CCW;
450
451 /* Winding is inverted when rendering to FBO */
452 if (ctx->DrawBuffer && _mesa_is_user_fbo(ctx->DrawBuffer))
453 cull_face = (mode == GL_CCW) ? RADEON_FFACE_CULL_CW : RADEON_FFACE_CULL_CCW;
454 rmesa->hw.set.cmd[SET_SE_CNTL] |= cull_face;
455
456 if ( mode == GL_CCW )
457 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= RADEON_CULL_FRONT_IS_CCW;
458 }
459
460
461 /* =============================================================
462 * Line state
463 */
radeonLineWidth(struct gl_context * ctx,GLfloat widthf)464 static void radeonLineWidth( struct gl_context *ctx, GLfloat widthf )
465 {
466 r100ContextPtr rmesa = R100_CONTEXT(ctx);
467
468 RADEON_STATECHANGE( rmesa, lin );
469 RADEON_STATECHANGE( rmesa, set );
470
471 /* Line width is stored in U6.4 format.
472 */
473 rmesa->hw.lin.cmd[LIN_SE_LINE_WIDTH] = (GLuint)(widthf * 16.0);
474 if ( widthf > 1.0 ) {
475 rmesa->hw.set.cmd[SET_SE_CNTL] |= RADEON_WIDELINE_ENABLE;
476 } else {
477 rmesa->hw.set.cmd[SET_SE_CNTL] &= ~RADEON_WIDELINE_ENABLE;
478 }
479 }
480
radeonLineStipple(struct gl_context * ctx,GLint factor,GLushort pattern)481 static void radeonLineStipple( struct gl_context *ctx, GLint factor, GLushort pattern )
482 {
483 r100ContextPtr rmesa = R100_CONTEXT(ctx);
484
485 RADEON_STATECHANGE( rmesa, lin );
486 rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] =
487 ((((GLuint)factor & 0xff) << 16) | ((GLuint)pattern));
488 }
489
490
491 /* =============================================================
492 * Masks
493 */
radeonColorMask(struct gl_context * ctx,GLboolean r,GLboolean g,GLboolean b,GLboolean a)494 static void radeonColorMask( struct gl_context *ctx,
495 GLboolean r, GLboolean g,
496 GLboolean b, GLboolean a )
497 {
498 r100ContextPtr rmesa = R100_CONTEXT(ctx);
499 struct radeon_renderbuffer *rrb;
500 GLuint mask;
501
502 rrb = radeon_get_colorbuffer(&rmesa->radeon);
503 if (!rrb)
504 return;
505
506 mask = radeonPackColor( rrb->cpp,
507 ctx->Color.ColorMask[0][RCOMP],
508 ctx->Color.ColorMask[0][GCOMP],
509 ctx->Color.ColorMask[0][BCOMP],
510 ctx->Color.ColorMask[0][ACOMP] );
511
512 if ( rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK] != mask ) {
513 RADEON_STATECHANGE( rmesa, msk );
514 rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK] = mask;
515 }
516 }
517
518
519 /* =============================================================
520 * Polygon state
521 */
522
radeonPolygonOffset(struct gl_context * ctx,GLfloat factor,GLfloat units,GLfloat clamp)523 static void radeonPolygonOffset( struct gl_context *ctx,
524 GLfloat factor, GLfloat units, GLfloat clamp )
525 {
526 r100ContextPtr rmesa = R100_CONTEXT(ctx);
527 const GLfloat depthScale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
528 float_ui32_type constant = { units * depthScale };
529 float_ui32_type factoru = { factor };
530
531 RADEON_STATECHANGE( rmesa, zbs );
532 rmesa->hw.zbs.cmd[ZBS_SE_ZBIAS_FACTOR] = factoru.ui32;
533 rmesa->hw.zbs.cmd[ZBS_SE_ZBIAS_CONSTANT] = constant.ui32;
534 }
535
radeonPolygonMode(struct gl_context * ctx,GLenum face,GLenum mode)536 static void radeonPolygonMode( struct gl_context *ctx, GLenum face, GLenum mode )
537 {
538 r100ContextPtr rmesa = R100_CONTEXT(ctx);
539 GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
540 ctx->Polygon.BackMode != GL_FILL);
541
542 /* Can't generally do unfilled via tcl, but some good special
543 * cases work.
544 */
545 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_UNFILLED, unfilled);
546 if (rmesa->radeon.TclFallback) {
547 radeonChooseRenderState( ctx );
548 radeonChooseVertexState( ctx );
549 }
550 }
551
552
553 /* =============================================================
554 * Rendering attributes
555 *
556 * We really don't want to recalculate all this every time we bind a
557 * texture. These things shouldn't change all that often, so it makes
558 * sense to break them out of the core texture state update routines.
559 */
560
561 /* Examine lighting and texture state to determine if separate specular
562 * should be enabled.
563 */
radeonUpdateSpecular(struct gl_context * ctx)564 static void radeonUpdateSpecular( struct gl_context *ctx )
565 {
566 r100ContextPtr rmesa = R100_CONTEXT(ctx);
567 uint32_t p = rmesa->hw.ctx.cmd[CTX_PP_CNTL];
568 GLuint flag = 0;
569
570 RADEON_STATECHANGE( rmesa, tcl );
571
572 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] &= ~RADEON_TCL_COMPUTE_SPECULAR;
573 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] &= ~RADEON_TCL_COMPUTE_DIFFUSE;
574 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~RADEON_TCL_VTX_PK_SPEC;
575 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~RADEON_TCL_VTX_PK_DIFFUSE;
576 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_LIGHTING_ENABLE;
577
578 p &= ~RADEON_SPECULAR_ENABLE;
579
580 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_DIFFUSE_SPECULAR_COMBINE;
581
582
583 if (ctx->Light.Enabled &&
584 ctx->Light.Model.ColorControl == GL_SEPARATE_SPECULAR_COLOR) {
585 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] |= RADEON_TCL_COMPUTE_SPECULAR;
586 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] |= RADEON_TCL_COMPUTE_DIFFUSE;
587 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_SPEC;
588 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_DIFFUSE;
589 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_LIGHTING_ENABLE;
590 p |= RADEON_SPECULAR_ENABLE;
591 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &=
592 ~RADEON_DIFFUSE_SPECULAR_COMBINE;
593 }
594 else if (ctx->Light.Enabled) {
595 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] |= RADEON_TCL_COMPUTE_DIFFUSE;
596 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_DIFFUSE;
597 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_LIGHTING_ENABLE;
598 } else if (ctx->Fog.ColorSumEnabled ) {
599 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_SPEC;
600 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_DIFFUSE;
601 p |= RADEON_SPECULAR_ENABLE;
602 } else {
603 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_DIFFUSE;
604 }
605
606 if (ctx->Fog.Enabled) {
607 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_PK_SPEC;
608 if (ctx->Fog.FogCoordinateSource == GL_FRAGMENT_DEPTH) {
609 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] |= RADEON_TCL_COMPUTE_SPECULAR;
610 /* Bizzare: have to leave lighting enabled to get fog. */
611 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_LIGHTING_ENABLE;
612 }
613 else {
614 /* cannot do tcl fog factor calculation with fog coord source
615 * (send precomputed factors). Cannot use precomputed fog
616 * factors together with tcl spec light (need tcl fallback) */
617 flag = (rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] &
618 RADEON_TCL_COMPUTE_SPECULAR) != 0;
619 }
620 }
621
622 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_FOGCOORDSPEC, flag);
623
624 if (_mesa_need_secondary_color(ctx)) {
625 assert( (p & RADEON_SPECULAR_ENABLE) != 0 );
626 } else {
627 assert( (p & RADEON_SPECULAR_ENABLE) == 0 );
628 }
629
630 if ( rmesa->hw.ctx.cmd[CTX_PP_CNTL] != p ) {
631 RADEON_STATECHANGE( rmesa, ctx );
632 rmesa->hw.ctx.cmd[CTX_PP_CNTL] = p;
633 }
634
635 /* Update vertex/render formats
636 */
637 if (rmesa->radeon.TclFallback) {
638 radeonChooseRenderState( ctx );
639 radeonChooseVertexState( ctx );
640 }
641 }
642
643
644 /* =============================================================
645 * Materials
646 */
647
648
649 /* Update on colormaterial, material emmissive/ambient,
650 * lightmodel.globalambient
651 */
update_global_ambient(struct gl_context * ctx)652 static void update_global_ambient( struct gl_context *ctx )
653 {
654 r100ContextPtr rmesa = R100_CONTEXT(ctx);
655 float *fcmd = (float *)RADEON_DB_STATE( glt );
656
657 /* Need to do more if both emmissive & ambient are PREMULT:
658 * Hope this is not needed for MULT
659 */
660 if ((rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &
661 ((3 << RADEON_EMISSIVE_SOURCE_SHIFT) |
662 (3 << RADEON_AMBIENT_SOURCE_SHIFT))) == 0)
663 {
664 COPY_3V( &fcmd[GLT_RED],
665 ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_EMISSION]);
666 ACC_SCALE_3V( &fcmd[GLT_RED],
667 ctx->Light.Model.Ambient,
668 ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_AMBIENT]);
669 }
670 else
671 {
672 COPY_3V( &fcmd[GLT_RED], ctx->Light.Model.Ambient );
673 }
674
675 RADEON_DB_STATECHANGE(rmesa, &rmesa->hw.glt);
676 }
677
678 /* Update on change to
679 * - light[p].colors
680 * - light[p].enabled
681 */
update_light_colors(struct gl_context * ctx,GLuint p)682 static void update_light_colors( struct gl_context *ctx, GLuint p )
683 {
684 struct gl_light *l = &ctx->Light.Light[p];
685
686 /* fprintf(stderr, "%s\n", __func__); */
687
688 if (l->Enabled) {
689 r100ContextPtr rmesa = R100_CONTEXT(ctx);
690 float *fcmd = (float *)RADEON_DB_STATE( lit[p] );
691
692 COPY_4V( &fcmd[LIT_AMBIENT_RED], l->Ambient );
693 COPY_4V( &fcmd[LIT_DIFFUSE_RED], l->Diffuse );
694 COPY_4V( &fcmd[LIT_SPECULAR_RED], l->Specular );
695
696 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.lit[p] );
697 }
698 }
699
700 /* Also fallback for asym colormaterial mode in twoside lighting...
701 */
check_twoside_fallback(struct gl_context * ctx)702 static void check_twoside_fallback( struct gl_context *ctx )
703 {
704 GLboolean fallback = GL_FALSE;
705 GLint i;
706
707 if (ctx->Light.Enabled && ctx->Light.Model.TwoSide) {
708 if (ctx->Light.ColorMaterialEnabled &&
709 (ctx->Light._ColorMaterialBitmask & BACK_MATERIAL_BITS) !=
710 ((ctx->Light._ColorMaterialBitmask & FRONT_MATERIAL_BITS)<<1))
711 fallback = GL_TRUE;
712 else {
713 for (i = MAT_ATTRIB_FRONT_AMBIENT; i < MAT_ATTRIB_FRONT_INDEXES; i+=2)
714 if (memcmp( ctx->Light.Material.Attrib[i],
715 ctx->Light.Material.Attrib[i+1],
716 sizeof(GLfloat)*4) != 0) {
717 fallback = GL_TRUE;
718 break;
719 }
720 }
721 }
722
723 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_LIGHT_TWOSIDE, fallback );
724 }
725
726
radeonColorMaterial(struct gl_context * ctx,GLenum face,GLenum mode)727 static void radeonColorMaterial( struct gl_context *ctx, GLenum face, GLenum mode )
728 {
729 r100ContextPtr rmesa = R100_CONTEXT(ctx);
730 GLuint light_model_ctl1 = rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL];
731
732 light_model_ctl1 &= ~((3 << RADEON_EMISSIVE_SOURCE_SHIFT) |
733 (3 << RADEON_AMBIENT_SOURCE_SHIFT) |
734 (3 << RADEON_DIFFUSE_SOURCE_SHIFT) |
735 (3 << RADEON_SPECULAR_SOURCE_SHIFT));
736
737 if (ctx->Light.ColorMaterialEnabled) {
738 GLuint mask = ctx->Light._ColorMaterialBitmask;
739
740 if (mask & MAT_BIT_FRONT_EMISSION) {
741 light_model_ctl1 |= (RADEON_LM_SOURCE_VERTEX_DIFFUSE <<
742 RADEON_EMISSIVE_SOURCE_SHIFT);
743 }
744 else {
745 light_model_ctl1 |= (RADEON_LM_SOURCE_STATE_MULT <<
746 RADEON_EMISSIVE_SOURCE_SHIFT);
747 }
748
749 if (mask & MAT_BIT_FRONT_AMBIENT) {
750 light_model_ctl1 |= (RADEON_LM_SOURCE_VERTEX_DIFFUSE <<
751 RADEON_AMBIENT_SOURCE_SHIFT);
752 }
753 else {
754 light_model_ctl1 |= (RADEON_LM_SOURCE_STATE_MULT <<
755 RADEON_AMBIENT_SOURCE_SHIFT);
756 }
757
758 if (mask & MAT_BIT_FRONT_DIFFUSE) {
759 light_model_ctl1 |= (RADEON_LM_SOURCE_VERTEX_DIFFUSE <<
760 RADEON_DIFFUSE_SOURCE_SHIFT);
761 }
762 else {
763 light_model_ctl1 |= (RADEON_LM_SOURCE_STATE_MULT <<
764 RADEON_DIFFUSE_SOURCE_SHIFT);
765 }
766
767 if (mask & MAT_BIT_FRONT_SPECULAR) {
768 light_model_ctl1 |= (RADEON_LM_SOURCE_VERTEX_DIFFUSE <<
769 RADEON_SPECULAR_SOURCE_SHIFT);
770 }
771 else {
772 light_model_ctl1 |= (RADEON_LM_SOURCE_STATE_MULT <<
773 RADEON_SPECULAR_SOURCE_SHIFT);
774 }
775 }
776 else {
777 /* Default to MULT:
778 */
779 light_model_ctl1 |= (RADEON_LM_SOURCE_STATE_MULT << RADEON_EMISSIVE_SOURCE_SHIFT) |
780 (RADEON_LM_SOURCE_STATE_MULT << RADEON_AMBIENT_SOURCE_SHIFT) |
781 (RADEON_LM_SOURCE_STATE_MULT << RADEON_DIFFUSE_SOURCE_SHIFT) |
782 (RADEON_LM_SOURCE_STATE_MULT << RADEON_SPECULAR_SOURCE_SHIFT);
783 }
784
785 if (light_model_ctl1 != rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]) {
786 RADEON_STATECHANGE( rmesa, tcl );
787 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] = light_model_ctl1;
788 }
789 }
790
radeonUpdateMaterial(struct gl_context * ctx)791 void radeonUpdateMaterial( struct gl_context *ctx )
792 {
793 r100ContextPtr rmesa = R100_CONTEXT(ctx);
794 GLfloat (*mat)[4] = ctx->Light.Material.Attrib;
795 GLfloat *fcmd = (GLfloat *)RADEON_DB_STATE( mtl );
796 GLuint mask = ~0;
797
798 if (ctx->Light.ColorMaterialEnabled)
799 mask &= ~ctx->Light._ColorMaterialBitmask;
800
801 if (RADEON_DEBUG & RADEON_STATE)
802 fprintf(stderr, "%s\n", __func__);
803
804
805 if (mask & MAT_BIT_FRONT_EMISSION) {
806 fcmd[MTL_EMMISSIVE_RED] = mat[MAT_ATTRIB_FRONT_EMISSION][0];
807 fcmd[MTL_EMMISSIVE_GREEN] = mat[MAT_ATTRIB_FRONT_EMISSION][1];
808 fcmd[MTL_EMMISSIVE_BLUE] = mat[MAT_ATTRIB_FRONT_EMISSION][2];
809 fcmd[MTL_EMMISSIVE_ALPHA] = mat[MAT_ATTRIB_FRONT_EMISSION][3];
810 }
811 if (mask & MAT_BIT_FRONT_AMBIENT) {
812 fcmd[MTL_AMBIENT_RED] = mat[MAT_ATTRIB_FRONT_AMBIENT][0];
813 fcmd[MTL_AMBIENT_GREEN] = mat[MAT_ATTRIB_FRONT_AMBIENT][1];
814 fcmd[MTL_AMBIENT_BLUE] = mat[MAT_ATTRIB_FRONT_AMBIENT][2];
815 fcmd[MTL_AMBIENT_ALPHA] = mat[MAT_ATTRIB_FRONT_AMBIENT][3];
816 }
817 if (mask & MAT_BIT_FRONT_DIFFUSE) {
818 fcmd[MTL_DIFFUSE_RED] = mat[MAT_ATTRIB_FRONT_DIFFUSE][0];
819 fcmd[MTL_DIFFUSE_GREEN] = mat[MAT_ATTRIB_FRONT_DIFFUSE][1];
820 fcmd[MTL_DIFFUSE_BLUE] = mat[MAT_ATTRIB_FRONT_DIFFUSE][2];
821 fcmd[MTL_DIFFUSE_ALPHA] = mat[MAT_ATTRIB_FRONT_DIFFUSE][3];
822 }
823 if (mask & MAT_BIT_FRONT_SPECULAR) {
824 fcmd[MTL_SPECULAR_RED] = mat[MAT_ATTRIB_FRONT_SPECULAR][0];
825 fcmd[MTL_SPECULAR_GREEN] = mat[MAT_ATTRIB_FRONT_SPECULAR][1];
826 fcmd[MTL_SPECULAR_BLUE] = mat[MAT_ATTRIB_FRONT_SPECULAR][2];
827 fcmd[MTL_SPECULAR_ALPHA] = mat[MAT_ATTRIB_FRONT_SPECULAR][3];
828 }
829 if (mask & MAT_BIT_FRONT_SHININESS) {
830 fcmd[MTL_SHININESS] = mat[MAT_ATTRIB_FRONT_SHININESS][0];
831 }
832
833 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mtl );
834
835 check_twoside_fallback( ctx );
836 /* update_global_ambient( ctx );*/
837 }
838
839 /* _NEW_LIGHT
840 * _NEW_MODELVIEW
841 * _MESA_NEW_NEED_EYE_COORDS
842 *
843 * Uses derived state from mesa:
844 * _VP_inf_norm
845 * _h_inf_norm
846 * _Position
847 * _NormSpotDirection
848 * _ModelViewInvScale
849 * _NeedEyeCoords
850 * _EyeZDir
851 *
852 * which are calculated in light.c and are correct for the current
853 * lighting space (model or eye), hence dependencies on _NEW_MODELVIEW
854 * and _MESA_NEW_NEED_EYE_COORDS.
855 */
update_light(struct gl_context * ctx)856 static void update_light( struct gl_context *ctx )
857 {
858 r100ContextPtr rmesa = R100_CONTEXT(ctx);
859
860 /* Have to check these, or have an automatic shortcircuit mechanism
861 * to remove noop statechanges. (Or just do a better job on the
862 * front end).
863 */
864 {
865 GLuint tmp = rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL];
866
867 if (ctx->_NeedEyeCoords)
868 tmp &= ~RADEON_LIGHT_IN_MODELSPACE;
869 else
870 tmp |= RADEON_LIGHT_IN_MODELSPACE;
871
872
873 /* Leave this test disabled: (unexplained q3 lockup) (even with
874 new packets)
875 */
876 if (tmp != rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL])
877 {
878 RADEON_STATECHANGE( rmesa, tcl );
879 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] = tmp;
880 }
881 }
882
883 {
884 GLfloat *fcmd = (GLfloat *)RADEON_DB_STATE( eye );
885 fcmd[EYE_X] = ctx->_EyeZDir[0];
886 fcmd[EYE_Y] = ctx->_EyeZDir[1];
887 fcmd[EYE_Z] = - ctx->_EyeZDir[2];
888 fcmd[EYE_RESCALE_FACTOR] = ctx->_ModelViewInvScale;
889 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.eye );
890 }
891
892
893
894 if (ctx->Light.Enabled) {
895 GLbitfield mask = ctx->Light._EnabledLights;
896 while (mask) {
897 const int p = u_bit_scan(&mask);
898 struct gl_light *l = &ctx->Light.Light[p];
899 GLfloat *fcmd = (GLfloat *)RADEON_DB_STATE( lit[p] );
900
901 if (l->EyePosition[3] == 0.0) {
902 COPY_3FV( &fcmd[LIT_POSITION_X], l->_VP_inf_norm );
903 COPY_3FV( &fcmd[LIT_DIRECTION_X], l->_h_inf_norm );
904 fcmd[LIT_POSITION_W] = 0;
905 fcmd[LIT_DIRECTION_W] = 0;
906 } else {
907 COPY_4V( &fcmd[LIT_POSITION_X], l->_Position );
908 fcmd[LIT_DIRECTION_X] = -l->_NormSpotDirection[0];
909 fcmd[LIT_DIRECTION_Y] = -l->_NormSpotDirection[1];
910 fcmd[LIT_DIRECTION_Z] = -l->_NormSpotDirection[2];
911 fcmd[LIT_DIRECTION_W] = 0;
912 }
913
914 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.lit[p] );
915 }
916 }
917 }
918
radeonLightfv(struct gl_context * ctx,GLenum light,GLenum pname,const GLfloat * params)919 static void radeonLightfv( struct gl_context *ctx, GLenum light,
920 GLenum pname, const GLfloat *params )
921 {
922 r100ContextPtr rmesa = R100_CONTEXT(ctx);
923 GLint p = light - GL_LIGHT0;
924 struct gl_light *l = &ctx->Light.Light[p];
925 GLfloat *fcmd = (GLfloat *)rmesa->hw.lit[p].cmd;
926
927
928 switch (pname) {
929 case GL_AMBIENT:
930 case GL_DIFFUSE:
931 case GL_SPECULAR:
932 update_light_colors( ctx, p );
933 break;
934
935 case GL_SPOT_DIRECTION:
936 /* picked up in update_light */
937 break;
938
939 case GL_POSITION: {
940 /* positions picked up in update_light, but can do flag here */
941 GLuint flag;
942 GLuint idx = TCL_PER_LIGHT_CTL_0 + p/2;
943
944 /* FIXME: Set RANGE_ATTEN only when needed */
945 if (p&1)
946 flag = RADEON_LIGHT_1_IS_LOCAL;
947 else
948 flag = RADEON_LIGHT_0_IS_LOCAL;
949
950 RADEON_STATECHANGE(rmesa, tcl);
951 if (l->EyePosition[3] != 0.0F)
952 rmesa->hw.tcl.cmd[idx] |= flag;
953 else
954 rmesa->hw.tcl.cmd[idx] &= ~flag;
955 break;
956 }
957
958 case GL_SPOT_EXPONENT:
959 RADEON_STATECHANGE(rmesa, lit[p]);
960 fcmd[LIT_SPOT_EXPONENT] = params[0];
961 break;
962
963 case GL_SPOT_CUTOFF: {
964 GLuint flag = (p&1) ? RADEON_LIGHT_1_IS_SPOT : RADEON_LIGHT_0_IS_SPOT;
965 GLuint idx = TCL_PER_LIGHT_CTL_0 + p/2;
966
967 RADEON_STATECHANGE(rmesa, lit[p]);
968 fcmd[LIT_SPOT_CUTOFF] = l->_CosCutoff;
969
970 RADEON_STATECHANGE(rmesa, tcl);
971 if (l->SpotCutoff != 180.0F)
972 rmesa->hw.tcl.cmd[idx] |= flag;
973 else
974 rmesa->hw.tcl.cmd[idx] &= ~flag;
975
976 break;
977 }
978
979 case GL_CONSTANT_ATTENUATION:
980 RADEON_STATECHANGE(rmesa, lit[p]);
981 fcmd[LIT_ATTEN_CONST] = params[0];
982 if ( params[0] == 0.0 )
983 fcmd[LIT_ATTEN_CONST_INV] = FLT_MAX;
984 else
985 fcmd[LIT_ATTEN_CONST_INV] = 1.0 / params[0];
986 break;
987 case GL_LINEAR_ATTENUATION:
988 RADEON_STATECHANGE(rmesa, lit[p]);
989 fcmd[LIT_ATTEN_LINEAR] = params[0];
990 break;
991 case GL_QUADRATIC_ATTENUATION:
992 RADEON_STATECHANGE(rmesa, lit[p]);
993 fcmd[LIT_ATTEN_QUADRATIC] = params[0];
994 break;
995 default:
996 return;
997 }
998
999 /* Set RANGE_ATTEN only when needed */
1000 switch (pname) {
1001 case GL_POSITION:
1002 case GL_CONSTANT_ATTENUATION:
1003 case GL_LINEAR_ATTENUATION:
1004 case GL_QUADRATIC_ATTENUATION:
1005 {
1006 GLuint *icmd = (GLuint *)RADEON_DB_STATE( tcl );
1007 GLuint idx = TCL_PER_LIGHT_CTL_0 + p/2;
1008 GLuint atten_flag = ( p&1 ) ? RADEON_LIGHT_1_ENABLE_RANGE_ATTEN
1009 : RADEON_LIGHT_0_ENABLE_RANGE_ATTEN;
1010 GLuint atten_const_flag = ( p&1 ) ? RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN
1011 : RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN;
1012
1013 if ( l->EyePosition[3] == 0.0F ||
1014 ( ( fcmd[LIT_ATTEN_CONST] == 0.0 || fcmd[LIT_ATTEN_CONST] == 1.0 ) &&
1015 fcmd[LIT_ATTEN_QUADRATIC] == 0.0 && fcmd[LIT_ATTEN_LINEAR] == 0.0 ) ) {
1016 /* Disable attenuation */
1017 icmd[idx] &= ~atten_flag;
1018 } else {
1019 if ( fcmd[LIT_ATTEN_QUADRATIC] == 0.0 && fcmd[LIT_ATTEN_LINEAR] == 0.0 ) {
1020 /* Enable only constant portion of attenuation calculation */
1021 icmd[idx] |= ( atten_flag | atten_const_flag );
1022 } else {
1023 /* Enable full attenuation calculation */
1024 icmd[idx] &= ~atten_const_flag;
1025 icmd[idx] |= atten_flag;
1026 }
1027 }
1028
1029 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.tcl );
1030 break;
1031 }
1032 default:
1033 break;
1034 }
1035 }
1036
1037
1038
1039
radeonLightModelfv(struct gl_context * ctx,GLenum pname,const GLfloat * param)1040 static void radeonLightModelfv( struct gl_context *ctx, GLenum pname,
1041 const GLfloat *param )
1042 {
1043 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1044
1045 switch (pname) {
1046 case GL_LIGHT_MODEL_AMBIENT:
1047 update_global_ambient( ctx );
1048 break;
1049
1050 case GL_LIGHT_MODEL_LOCAL_VIEWER:
1051 RADEON_STATECHANGE( rmesa, tcl );
1052 if (ctx->Light.Model.LocalViewer)
1053 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_LOCAL_VIEWER;
1054 else
1055 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_LOCAL_VIEWER;
1056 break;
1057
1058 case GL_LIGHT_MODEL_TWO_SIDE:
1059 RADEON_STATECHANGE( rmesa, tcl );
1060 if (ctx->Light.Model.TwoSide)
1061 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= RADEON_LIGHT_TWOSIDE;
1062 else
1063 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~RADEON_LIGHT_TWOSIDE;
1064
1065 check_twoside_fallback( ctx );
1066
1067 if (rmesa->radeon.TclFallback) {
1068 radeonChooseRenderState( ctx );
1069 radeonChooseVertexState( ctx );
1070 }
1071 break;
1072
1073 case GL_LIGHT_MODEL_COLOR_CONTROL:
1074 radeonUpdateSpecular(ctx);
1075 break;
1076
1077 default:
1078 break;
1079 }
1080 }
1081
radeonShadeModel(struct gl_context * ctx,GLenum mode)1082 static void radeonShadeModel( struct gl_context *ctx, GLenum mode )
1083 {
1084 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1085 GLuint s = rmesa->hw.set.cmd[SET_SE_CNTL];
1086
1087 s &= ~(RADEON_DIFFUSE_SHADE_MASK |
1088 RADEON_ALPHA_SHADE_MASK |
1089 RADEON_SPECULAR_SHADE_MASK |
1090 RADEON_FOG_SHADE_MASK);
1091
1092 switch ( mode ) {
1093 case GL_FLAT:
1094 s |= (RADEON_DIFFUSE_SHADE_FLAT |
1095 RADEON_ALPHA_SHADE_FLAT |
1096 RADEON_SPECULAR_SHADE_FLAT |
1097 RADEON_FOG_SHADE_FLAT);
1098 break;
1099 case GL_SMOOTH:
1100 s |= (RADEON_DIFFUSE_SHADE_GOURAUD |
1101 RADEON_ALPHA_SHADE_GOURAUD |
1102 RADEON_SPECULAR_SHADE_GOURAUD |
1103 RADEON_FOG_SHADE_GOURAUD);
1104 break;
1105 default:
1106 return;
1107 }
1108
1109 if ( rmesa->hw.set.cmd[SET_SE_CNTL] != s ) {
1110 RADEON_STATECHANGE( rmesa, set );
1111 rmesa->hw.set.cmd[SET_SE_CNTL] = s;
1112 }
1113 }
1114
1115
1116 /* =============================================================
1117 * User clip planes
1118 */
1119
radeonClipPlane(struct gl_context * ctx,GLenum plane,const GLfloat * eq)1120 static void radeonClipPlane( struct gl_context *ctx, GLenum plane, const GLfloat *eq )
1121 {
1122 GLint p = (GLint) plane - (GLint) GL_CLIP_PLANE0;
1123 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1124 GLint *ip = (GLint *)ctx->Transform._ClipUserPlane[p];
1125
1126 RADEON_STATECHANGE( rmesa, ucp[p] );
1127 rmesa->hw.ucp[p].cmd[UCP_X] = ip[0];
1128 rmesa->hw.ucp[p].cmd[UCP_Y] = ip[1];
1129 rmesa->hw.ucp[p].cmd[UCP_Z] = ip[2];
1130 rmesa->hw.ucp[p].cmd[UCP_W] = ip[3];
1131 }
1132
radeonUpdateClipPlanes(struct gl_context * ctx)1133 static void radeonUpdateClipPlanes( struct gl_context *ctx )
1134 {
1135 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1136 GLbitfield mask = ctx->Transform.ClipPlanesEnabled;
1137
1138 while (mask) {
1139 const int p = u_bit_scan(&mask);
1140 GLint *ip = (GLint *)ctx->Transform._ClipUserPlane[p];
1141
1142 RADEON_STATECHANGE( rmesa, ucp[p] );
1143 rmesa->hw.ucp[p].cmd[UCP_X] = ip[0];
1144 rmesa->hw.ucp[p].cmd[UCP_Y] = ip[1];
1145 rmesa->hw.ucp[p].cmd[UCP_Z] = ip[2];
1146 rmesa->hw.ucp[p].cmd[UCP_W] = ip[3];
1147 }
1148 }
1149
1150
1151 /* =============================================================
1152 * Stencil
1153 */
1154
1155 static void
radeonStencilFuncSeparate(struct gl_context * ctx,GLenum face,GLenum func,GLint ref,GLuint mask)1156 radeonStencilFuncSeparate( struct gl_context *ctx, GLenum face, GLenum func,
1157 GLint ref, GLuint mask )
1158 {
1159 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1160 GLuint refmask = ((_mesa_get_stencil_ref(ctx, 0) << RADEON_STENCIL_REF_SHIFT) |
1161 ((ctx->Stencil.ValueMask[0] & 0xff) << RADEON_STENCIL_MASK_SHIFT));
1162
1163 RADEON_STATECHANGE( rmesa, ctx );
1164 RADEON_STATECHANGE( rmesa, msk );
1165
1166 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_STENCIL_TEST_MASK;
1167 rmesa->hw.msk.cmd[MSK_RB3D_STENCILREFMASK] &= ~(RADEON_STENCIL_REF_MASK|
1168 RADEON_STENCIL_VALUE_MASK);
1169
1170 switch ( ctx->Stencil.Function[0] ) {
1171 case GL_NEVER:
1172 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_NEVER;
1173 break;
1174 case GL_LESS:
1175 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_LESS;
1176 break;
1177 case GL_EQUAL:
1178 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_EQUAL;
1179 break;
1180 case GL_LEQUAL:
1181 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_LEQUAL;
1182 break;
1183 case GL_GREATER:
1184 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_GREATER;
1185 break;
1186 case GL_NOTEQUAL:
1187 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_NEQUAL;
1188 break;
1189 case GL_GEQUAL:
1190 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_GEQUAL;
1191 break;
1192 case GL_ALWAYS:
1193 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_TEST_ALWAYS;
1194 break;
1195 }
1196
1197 rmesa->hw.msk.cmd[MSK_RB3D_STENCILREFMASK] |= refmask;
1198 }
1199
1200 static void
radeonStencilMaskSeparate(struct gl_context * ctx,GLenum face,GLuint mask)1201 radeonStencilMaskSeparate( struct gl_context *ctx, GLenum face, GLuint mask )
1202 {
1203 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1204
1205 RADEON_STATECHANGE( rmesa, msk );
1206 rmesa->hw.msk.cmd[MSK_RB3D_STENCILREFMASK] &= ~RADEON_STENCIL_WRITE_MASK;
1207 rmesa->hw.msk.cmd[MSK_RB3D_STENCILREFMASK] |=
1208 ((ctx->Stencil.WriteMask[0] & 0xff) << RADEON_STENCIL_WRITEMASK_SHIFT);
1209 }
1210
radeonStencilOpSeparate(struct gl_context * ctx,GLenum face,GLenum fail,GLenum zfail,GLenum zpass)1211 static void radeonStencilOpSeparate( struct gl_context *ctx, GLenum face, GLenum fail,
1212 GLenum zfail, GLenum zpass )
1213 {
1214 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1215
1216 /* radeon 7200 have stencil bug, DEC and INC_WRAP will actually both do DEC_WRAP,
1217 and DEC_WRAP (and INVERT) will do INVERT. No way to get correct INC_WRAP and DEC,
1218 but DEC_WRAP can be fixed by using DEC and INC_WRAP at least use INC. */
1219
1220 GLuint tempRADEON_STENCIL_FAIL_DEC_WRAP;
1221 GLuint tempRADEON_STENCIL_FAIL_INC_WRAP;
1222 GLuint tempRADEON_STENCIL_ZFAIL_DEC_WRAP;
1223 GLuint tempRADEON_STENCIL_ZFAIL_INC_WRAP;
1224 GLuint tempRADEON_STENCIL_ZPASS_DEC_WRAP;
1225 GLuint tempRADEON_STENCIL_ZPASS_INC_WRAP;
1226
1227 if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_BROKEN_STENCIL) {
1228 tempRADEON_STENCIL_FAIL_DEC_WRAP = RADEON_STENCIL_FAIL_DEC;
1229 tempRADEON_STENCIL_FAIL_INC_WRAP = RADEON_STENCIL_FAIL_INC;
1230 tempRADEON_STENCIL_ZFAIL_DEC_WRAP = RADEON_STENCIL_ZFAIL_DEC;
1231 tempRADEON_STENCIL_ZFAIL_INC_WRAP = RADEON_STENCIL_ZFAIL_INC;
1232 tempRADEON_STENCIL_ZPASS_DEC_WRAP = RADEON_STENCIL_ZPASS_DEC;
1233 tempRADEON_STENCIL_ZPASS_INC_WRAP = RADEON_STENCIL_ZPASS_INC;
1234 }
1235 else {
1236 tempRADEON_STENCIL_FAIL_DEC_WRAP = RADEON_STENCIL_FAIL_DEC_WRAP;
1237 tempRADEON_STENCIL_FAIL_INC_WRAP = RADEON_STENCIL_FAIL_INC_WRAP;
1238 tempRADEON_STENCIL_ZFAIL_DEC_WRAP = RADEON_STENCIL_ZFAIL_DEC_WRAP;
1239 tempRADEON_STENCIL_ZFAIL_INC_WRAP = RADEON_STENCIL_ZFAIL_INC_WRAP;
1240 tempRADEON_STENCIL_ZPASS_DEC_WRAP = RADEON_STENCIL_ZPASS_DEC_WRAP;
1241 tempRADEON_STENCIL_ZPASS_INC_WRAP = RADEON_STENCIL_ZPASS_INC_WRAP;
1242 }
1243
1244 RADEON_STATECHANGE( rmesa, ctx );
1245 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] &= ~(RADEON_STENCIL_FAIL_MASK |
1246 RADEON_STENCIL_ZFAIL_MASK |
1247 RADEON_STENCIL_ZPASS_MASK);
1248
1249 switch ( ctx->Stencil.FailFunc[0] ) {
1250 case GL_KEEP:
1251 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_KEEP;
1252 break;
1253 case GL_ZERO:
1254 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_ZERO;
1255 break;
1256 case GL_REPLACE:
1257 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_REPLACE;
1258 break;
1259 case GL_INCR:
1260 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_INC;
1261 break;
1262 case GL_DECR:
1263 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_DEC;
1264 break;
1265 case GL_INCR_WRAP:
1266 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_FAIL_INC_WRAP;
1267 break;
1268 case GL_DECR_WRAP:
1269 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_FAIL_DEC_WRAP;
1270 break;
1271 case GL_INVERT:
1272 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_FAIL_INVERT;
1273 break;
1274 }
1275
1276 switch ( ctx->Stencil.ZFailFunc[0] ) {
1277 case GL_KEEP:
1278 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_KEEP;
1279 break;
1280 case GL_ZERO:
1281 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_ZERO;
1282 break;
1283 case GL_REPLACE:
1284 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_REPLACE;
1285 break;
1286 case GL_INCR:
1287 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_INC;
1288 break;
1289 case GL_DECR:
1290 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_DEC;
1291 break;
1292 case GL_INCR_WRAP:
1293 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_ZFAIL_INC_WRAP;
1294 break;
1295 case GL_DECR_WRAP:
1296 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_ZFAIL_DEC_WRAP;
1297 break;
1298 case GL_INVERT:
1299 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZFAIL_INVERT;
1300 break;
1301 }
1302
1303 switch ( ctx->Stencil.ZPassFunc[0] ) {
1304 case GL_KEEP:
1305 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_KEEP;
1306 break;
1307 case GL_ZERO:
1308 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_ZERO;
1309 break;
1310 case GL_REPLACE:
1311 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_REPLACE;
1312 break;
1313 case GL_INCR:
1314 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_INC;
1315 break;
1316 case GL_DECR:
1317 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_DEC;
1318 break;
1319 case GL_INCR_WRAP:
1320 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_ZPASS_INC_WRAP;
1321 break;
1322 case GL_DECR_WRAP:
1323 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= tempRADEON_STENCIL_ZPASS_DEC_WRAP;
1324 break;
1325 case GL_INVERT:
1326 rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_STENCIL_ZPASS_INVERT;
1327 break;
1328 }
1329 }
1330
1331
1332
1333 /* =============================================================
1334 * Window position and viewport transformation
1335 */
1336
1337 /*
1338 * To correctly position primitives:
1339 */
1340 #define SUBPIXEL_X 0.125
1341 #define SUBPIXEL_Y 0.125
1342
1343
1344 /**
1345 * Called when window size or position changes or viewport or depth range
1346 * state is changed. We update the hardware viewport state here.
1347 */
radeonUpdateWindow(struct gl_context * ctx)1348 void radeonUpdateWindow( struct gl_context *ctx )
1349 {
1350 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1351 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
1352 GLfloat xoffset = 0.0;
1353 GLfloat yoffset = dPriv ? (GLfloat) dPriv->h : 0;
1354 const GLboolean render_to_fbo = (ctx->DrawBuffer ? _mesa_is_user_fbo(ctx->DrawBuffer) : 0);
1355 float scale[3], translate[3];
1356 GLfloat y_scale, y_bias;
1357
1358 if (render_to_fbo) {
1359 y_scale = 1.0;
1360 y_bias = 0;
1361 } else {
1362 y_scale = -1.0;
1363 y_bias = yoffset;
1364 }
1365
1366 _mesa_get_viewport_xform(ctx, 0, scale, translate);
1367 float_ui32_type sx = { scale[0] };
1368 float_ui32_type sy = { scale[1] * y_scale };
1369 float_ui32_type sz = { scale[2] };
1370 float_ui32_type tx = { translate[0] + xoffset + SUBPIXEL_X };
1371 float_ui32_type ty = { (translate[1] * y_scale) + y_bias + SUBPIXEL_Y };
1372 float_ui32_type tz = { translate[2] };
1373
1374 RADEON_STATECHANGE( rmesa, vpt );
1375
1376 rmesa->hw.vpt.cmd[VPT_SE_VPORT_XSCALE] = sx.ui32;
1377 rmesa->hw.vpt.cmd[VPT_SE_VPORT_XOFFSET] = tx.ui32;
1378 rmesa->hw.vpt.cmd[VPT_SE_VPORT_YSCALE] = sy.ui32;
1379 rmesa->hw.vpt.cmd[VPT_SE_VPORT_YOFFSET] = ty.ui32;
1380 rmesa->hw.vpt.cmd[VPT_SE_VPORT_ZSCALE] = sz.ui32;
1381 rmesa->hw.vpt.cmd[VPT_SE_VPORT_ZOFFSET] = tz.ui32;
1382 }
1383
1384
radeonViewport(struct gl_context * ctx)1385 static void radeonViewport(struct gl_context *ctx)
1386 {
1387 /* Don't pipeline viewport changes, conflict with window offset
1388 * setting below. Could apply deltas to rescue pipelined viewport
1389 * values, or keep the originals hanging around.
1390 */
1391 radeonUpdateWindow( ctx );
1392
1393 radeon_viewport(ctx);
1394 }
1395
radeonDepthRange(struct gl_context * ctx)1396 static void radeonDepthRange(struct gl_context *ctx)
1397 {
1398 radeonUpdateWindow( ctx );
1399 }
1400
1401 /* =============================================================
1402 * Miscellaneous
1403 */
1404
radeonRenderMode(struct gl_context * ctx,GLenum mode)1405 static void radeonRenderMode( struct gl_context *ctx, GLenum mode )
1406 {
1407 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1408 FALLBACK( rmesa, RADEON_FALLBACK_RENDER_MODE, (mode != GL_RENDER) );
1409 }
1410
1411
1412 static GLuint radeon_rop_tab[] = {
1413 RADEON_ROP_CLEAR,
1414 RADEON_ROP_AND,
1415 RADEON_ROP_AND_REVERSE,
1416 RADEON_ROP_COPY,
1417 RADEON_ROP_AND_INVERTED,
1418 RADEON_ROP_NOOP,
1419 RADEON_ROP_XOR,
1420 RADEON_ROP_OR,
1421 RADEON_ROP_NOR,
1422 RADEON_ROP_EQUIV,
1423 RADEON_ROP_INVERT,
1424 RADEON_ROP_OR_REVERSE,
1425 RADEON_ROP_COPY_INVERTED,
1426 RADEON_ROP_OR_INVERTED,
1427 RADEON_ROP_NAND,
1428 RADEON_ROP_SET,
1429 };
1430
radeonLogicOpCode(struct gl_context * ctx,GLenum opcode)1431 static void radeonLogicOpCode( struct gl_context *ctx, GLenum opcode )
1432 {
1433 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1434 GLuint rop = (GLuint)opcode - GL_CLEAR;
1435
1436 assert( rop < 16 );
1437
1438 RADEON_STATECHANGE( rmesa, msk );
1439 rmesa->hw.msk.cmd[MSK_RB3D_ROPCNTL] = radeon_rop_tab[rop];
1440 }
1441
1442 /* =============================================================
1443 * State enable/disable
1444 */
1445
radeonEnable(struct gl_context * ctx,GLenum cap,GLboolean state)1446 static void radeonEnable( struct gl_context *ctx, GLenum cap, GLboolean state )
1447 {
1448 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1449 GLuint p, flag;
1450
1451 if ( RADEON_DEBUG & RADEON_STATE )
1452 fprintf( stderr, "%s( %s = %s )\n", __func__,
1453 _mesa_enum_to_string( cap ),
1454 state ? "GL_TRUE" : "GL_FALSE" );
1455
1456 switch ( cap ) {
1457 /* Fast track this one...
1458 */
1459 case GL_TEXTURE_1D:
1460 case GL_TEXTURE_2D:
1461 case GL_TEXTURE_3D:
1462 break;
1463
1464 case GL_ALPHA_TEST:
1465 RADEON_STATECHANGE( rmesa, ctx );
1466 if (state) {
1467 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_ALPHA_TEST_ENABLE;
1468 } else {
1469 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_ALPHA_TEST_ENABLE;
1470 }
1471 break;
1472
1473 case GL_BLEND:
1474 RADEON_STATECHANGE( rmesa, ctx );
1475 if (state) {
1476 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_ALPHA_BLEND_ENABLE;
1477 } else {
1478 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_ALPHA_BLEND_ENABLE;
1479 }
1480 if ( (ctx->Color.ColorLogicOpEnabled || (ctx->Color.BlendEnabled
1481 && ctx->Color.Blend[0].EquationRGB == GL_LOGIC_OP)) ) {
1482 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_ROP_ENABLE;
1483 } else {
1484 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_ROP_ENABLE;
1485 }
1486
1487 /* Catch a possible fallback:
1488 */
1489 if (state) {
1490 ctx->Driver.BlendEquationSeparate( ctx,
1491 ctx->Color.Blend[0].EquationRGB,
1492 ctx->Color.Blend[0].EquationA );
1493 ctx->Driver.BlendFuncSeparate( ctx, ctx->Color.Blend[0].SrcRGB,
1494 ctx->Color.Blend[0].DstRGB,
1495 ctx->Color.Blend[0].SrcA,
1496 ctx->Color.Blend[0].DstA );
1497 }
1498 else {
1499 FALLBACK( rmesa, RADEON_FALLBACK_BLEND_FUNC, GL_FALSE );
1500 FALLBACK( rmesa, RADEON_FALLBACK_BLEND_EQ, GL_FALSE );
1501 }
1502 break;
1503
1504 case GL_CLIP_PLANE0:
1505 case GL_CLIP_PLANE1:
1506 case GL_CLIP_PLANE2:
1507 case GL_CLIP_PLANE3:
1508 case GL_CLIP_PLANE4:
1509 case GL_CLIP_PLANE5:
1510 p = cap-GL_CLIP_PLANE0;
1511 RADEON_STATECHANGE( rmesa, tcl );
1512 if (state) {
1513 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= (RADEON_UCP_ENABLE_0<<p);
1514 radeonClipPlane( ctx, cap, NULL );
1515 }
1516 else {
1517 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~(RADEON_UCP_ENABLE_0<<p);
1518 }
1519 break;
1520
1521 case GL_COLOR_MATERIAL:
1522 radeonColorMaterial( ctx, 0, 0 );
1523 radeonUpdateMaterial( ctx );
1524 break;
1525
1526 case GL_CULL_FACE:
1527 radeonCullFace( ctx, 0 );
1528 break;
1529
1530 case GL_DEPTH_TEST:
1531 RADEON_STATECHANGE(rmesa, ctx );
1532 if ( state ) {
1533 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_Z_ENABLE;
1534 } else {
1535 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_Z_ENABLE;
1536 }
1537 break;
1538
1539 case GL_DITHER:
1540 RADEON_STATECHANGE(rmesa, ctx );
1541 if ( state ) {
1542 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_DITHER_ENABLE;
1543 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~rmesa->radeon.state.color.roundEnable;
1544 } else {
1545 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_DITHER_ENABLE;
1546 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= rmesa->radeon.state.color.roundEnable;
1547 }
1548 break;
1549
1550 case GL_FOG:
1551 RADEON_STATECHANGE(rmesa, ctx );
1552 if ( state ) {
1553 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_FOG_ENABLE;
1554 radeonFogfv( ctx, GL_FOG_MODE, NULL );
1555 } else {
1556 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_FOG_ENABLE;
1557 RADEON_STATECHANGE(rmesa, tcl);
1558 rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~RADEON_TCL_FOG_MASK;
1559 }
1560 radeonUpdateSpecular( ctx ); /* for PK_SPEC */
1561 _mesa_allow_light_in_model( ctx, !state );
1562 break;
1563
1564 case GL_LIGHT0:
1565 case GL_LIGHT1:
1566 case GL_LIGHT2:
1567 case GL_LIGHT3:
1568 case GL_LIGHT4:
1569 case GL_LIGHT5:
1570 case GL_LIGHT6:
1571 case GL_LIGHT7:
1572 RADEON_STATECHANGE(rmesa, tcl);
1573 p = cap - GL_LIGHT0;
1574 if (p&1)
1575 flag = (RADEON_LIGHT_1_ENABLE |
1576 RADEON_LIGHT_1_ENABLE_AMBIENT |
1577 RADEON_LIGHT_1_ENABLE_SPECULAR);
1578 else
1579 flag = (RADEON_LIGHT_0_ENABLE |
1580 RADEON_LIGHT_0_ENABLE_AMBIENT |
1581 RADEON_LIGHT_0_ENABLE_SPECULAR);
1582
1583 if (state)
1584 rmesa->hw.tcl.cmd[p/2 + TCL_PER_LIGHT_CTL_0] |= flag;
1585 else
1586 rmesa->hw.tcl.cmd[p/2 + TCL_PER_LIGHT_CTL_0] &= ~flag;
1587
1588 /*
1589 */
1590 update_light_colors( ctx, p );
1591 break;
1592
1593 case GL_LIGHTING:
1594 RADEON_STATECHANGE(rmesa, tcl);
1595 radeonUpdateSpecular(ctx);
1596 check_twoside_fallback( ctx );
1597 break;
1598
1599 case GL_LINE_SMOOTH:
1600 RADEON_STATECHANGE( rmesa, ctx );
1601 if ( state ) {
1602 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_ANTI_ALIAS_LINE;
1603 } else {
1604 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_ANTI_ALIAS_LINE;
1605 }
1606 break;
1607
1608 case GL_LINE_STIPPLE:
1609 RADEON_STATECHANGE( rmesa, ctx );
1610 if ( state ) {
1611 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_PATTERN_ENABLE;
1612 } else {
1613 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_PATTERN_ENABLE;
1614 }
1615 break;
1616
1617 case GL_COLOR_LOGIC_OP:
1618 RADEON_STATECHANGE( rmesa, ctx );
1619 if ( (ctx->Color.ColorLogicOpEnabled || (ctx->Color.BlendEnabled
1620 && ctx->Color.Blend[0].EquationRGB == GL_LOGIC_OP)) ) {
1621 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_ROP_ENABLE;
1622 } else {
1623 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_ROP_ENABLE;
1624 }
1625 break;
1626
1627 case GL_NORMALIZE:
1628 RADEON_STATECHANGE( rmesa, tcl );
1629 if ( state ) {
1630 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_NORMALIZE_NORMALS;
1631 } else {
1632 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_NORMALIZE_NORMALS;
1633 }
1634 break;
1635
1636 case GL_POLYGON_OFFSET_POINT:
1637 RADEON_STATECHANGE( rmesa, set );
1638 if ( state ) {
1639 rmesa->hw.set.cmd[SET_SE_CNTL] |= RADEON_ZBIAS_ENABLE_POINT;
1640 } else {
1641 rmesa->hw.set.cmd[SET_SE_CNTL] &= ~RADEON_ZBIAS_ENABLE_POINT;
1642 }
1643 break;
1644
1645 case GL_POLYGON_OFFSET_LINE:
1646 RADEON_STATECHANGE( rmesa, set );
1647 if ( state ) {
1648 rmesa->hw.set.cmd[SET_SE_CNTL] |= RADEON_ZBIAS_ENABLE_LINE;
1649 } else {
1650 rmesa->hw.set.cmd[SET_SE_CNTL] &= ~RADEON_ZBIAS_ENABLE_LINE;
1651 }
1652 break;
1653
1654 case GL_POLYGON_OFFSET_FILL:
1655 RADEON_STATECHANGE( rmesa, set );
1656 if ( state ) {
1657 rmesa->hw.set.cmd[SET_SE_CNTL] |= RADEON_ZBIAS_ENABLE_TRI;
1658 } else {
1659 rmesa->hw.set.cmd[SET_SE_CNTL] &= ~RADEON_ZBIAS_ENABLE_TRI;
1660 }
1661 break;
1662
1663 case GL_POLYGON_SMOOTH:
1664 RADEON_STATECHANGE( rmesa, ctx );
1665 if ( state ) {
1666 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_ANTI_ALIAS_POLY;
1667 } else {
1668 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_ANTI_ALIAS_POLY;
1669 }
1670 break;
1671
1672 case GL_POLYGON_STIPPLE:
1673 RADEON_STATECHANGE(rmesa, ctx );
1674 if ( state ) {
1675 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= RADEON_STIPPLE_ENABLE;
1676 } else {
1677 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~RADEON_STIPPLE_ENABLE;
1678 }
1679 break;
1680
1681 case GL_RESCALE_NORMAL_EXT: {
1682 GLboolean tmp = ctx->_NeedEyeCoords ? state : !state;
1683 RADEON_STATECHANGE( rmesa, tcl );
1684 if ( tmp ) {
1685 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_RESCALE_NORMALS;
1686 } else {
1687 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_RESCALE_NORMALS;
1688 }
1689 break;
1690 }
1691
1692 case GL_SCISSOR_TEST:
1693 radeon_firevertices(&rmesa->radeon);
1694 rmesa->radeon.state.scissor.enabled = state;
1695 radeonUpdateScissor( ctx );
1696 break;
1697
1698 case GL_STENCIL_TEST:
1699 {
1700 GLboolean hw_stencil = GL_FALSE;
1701 if (ctx->DrawBuffer) {
1702 struct radeon_renderbuffer *rrbStencil
1703 = radeon_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
1704 hw_stencil = (rrbStencil && rrbStencil->bo);
1705 }
1706
1707 if (hw_stencil) {
1708 RADEON_STATECHANGE( rmesa, ctx );
1709 if ( state ) {
1710 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_STENCIL_ENABLE;
1711 } else {
1712 rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] &= ~RADEON_STENCIL_ENABLE;
1713 }
1714 } else {
1715 FALLBACK( rmesa, RADEON_FALLBACK_STENCIL, state );
1716 }
1717 }
1718 break;
1719
1720 case GL_TEXTURE_GEN_Q:
1721 case GL_TEXTURE_GEN_R:
1722 case GL_TEXTURE_GEN_S:
1723 case GL_TEXTURE_GEN_T:
1724 /* Picked up in radeonUpdateTextureState.
1725 */
1726 rmesa->recheck_texgen[ctx->Texture.CurrentUnit] = GL_TRUE;
1727 break;
1728
1729 case GL_COLOR_SUM_EXT:
1730 radeonUpdateSpecular ( ctx );
1731 break;
1732
1733 default:
1734 return;
1735 }
1736 }
1737
1738
radeonLightingSpaceChange(struct gl_context * ctx)1739 static void radeonLightingSpaceChange( struct gl_context *ctx )
1740 {
1741 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1742 GLboolean tmp;
1743 RADEON_STATECHANGE( rmesa, tcl );
1744
1745 if (RADEON_DEBUG & RADEON_STATE)
1746 fprintf(stderr, "%s %d BEFORE %x\n", __func__, ctx->_NeedEyeCoords,
1747 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
1748
1749 if (ctx->_NeedEyeCoords)
1750 tmp = ctx->Transform.RescaleNormals;
1751 else
1752 tmp = !ctx->Transform.RescaleNormals;
1753
1754 if ( tmp ) {
1755 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] |= RADEON_RESCALE_NORMALS;
1756 } else {
1757 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_RESCALE_NORMALS;
1758 }
1759
1760 if (RADEON_DEBUG & RADEON_STATE)
1761 fprintf(stderr, "%s %d AFTER %x\n", __func__, ctx->_NeedEyeCoords,
1762 rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
1763 }
1764
1765 /* =============================================================
1766 * Deferred state management - matrices, textures, other?
1767 */
1768
1769
radeonUploadTexMatrix(r100ContextPtr rmesa,int unit,GLboolean swapcols)1770 void radeonUploadTexMatrix( r100ContextPtr rmesa,
1771 int unit, GLboolean swapcols )
1772 {
1773 /* Here's how this works: on r100, only 3 tex coords can be submitted, so the
1774 vector looks like this probably: (s t r|q 0) (not sure if the last coord
1775 is hardwired to 0, could be 1 too). Interestingly, it actually looks like
1776 texgen generates all 4 coords, at least tests with projtex indicated that.
1777 So: if we need the q coord in the end (solely determined by the texture
1778 target, i.e. 2d / 1d / texrect targets) we swap the third and 4th row.
1779 Additionally, if we don't have texgen but 4 tex coords submitted, we swap
1780 column 3 and 4 (for the 2d / 1d / texrect targets) since the q coord
1781 will get submitted in the "wrong", i.e. 3rd, slot.
1782 If an app submits 3 coords for 2d targets, we assume it is saving on vertex
1783 size and using the texture matrix to swap the r and q coords around (ut2k3
1784 does exactly that), so we don't need the 3rd / 4th column swap - still need
1785 the 3rd / 4th row swap of course. This will potentially break for apps which
1786 use TexCoord3x just for fun. Additionally, it will never work if an app uses
1787 an "advanced" texture matrix and relies on all 4 texcoord inputs to generate
1788 the maximum needed 3. This seems impossible to do with hw tcl on r100, and
1789 incredibly hard to detect so we can't just fallback in such a case. Assume
1790 it never happens... - rs
1791 */
1792
1793 int idx = TEXMAT_0 + unit;
1794 float *dest = ((float *)RADEON_DB_STATE( mat[idx] )) + MAT_ELT_0;
1795 int i;
1796 struct gl_texture_unit tUnit = rmesa->radeon.glCtx.Texture.Unit[unit];
1797 GLfloat *src = rmesa->tmpmat[unit].m;
1798
1799 rmesa->TexMatColSwap &= ~(1 << unit);
1800 if (!tUnit._Current ||
1801 (tUnit._Current->Target != GL_TEXTURE_3D &&
1802 tUnit._Current->Target != GL_TEXTURE_CUBE_MAP)) {
1803 if (swapcols) {
1804 rmesa->TexMatColSwap |= 1 << unit;
1805 /* attention some elems are swapped 2 times! */
1806 *dest++ = src[0];
1807 *dest++ = src[4];
1808 *dest++ = src[12];
1809 *dest++ = src[8];
1810 *dest++ = src[1];
1811 *dest++ = src[5];
1812 *dest++ = src[13];
1813 *dest++ = src[9];
1814 *dest++ = src[2];
1815 *dest++ = src[6];
1816 *dest++ = src[15];
1817 *dest++ = src[11];
1818 /* those last 4 are probably never used */
1819 *dest++ = src[3];
1820 *dest++ = src[7];
1821 *dest++ = src[14];
1822 *dest++ = src[10];
1823 }
1824 else {
1825 for (i = 0; i < 2; i++) {
1826 *dest++ = src[i];
1827 *dest++ = src[i+4];
1828 *dest++ = src[i+8];
1829 *dest++ = src[i+12];
1830 }
1831 for (i = 3; i >= 2; i--) {
1832 *dest++ = src[i];
1833 *dest++ = src[i+4];
1834 *dest++ = src[i+8];
1835 *dest++ = src[i+12];
1836 }
1837 }
1838 }
1839 else {
1840 for (i = 0 ; i < 4 ; i++) {
1841 *dest++ = src[i];
1842 *dest++ = src[i+4];
1843 *dest++ = src[i+8];
1844 *dest++ = src[i+12];
1845 }
1846 }
1847
1848 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] );
1849 }
1850
1851
upload_matrix(r100ContextPtr rmesa,GLfloat * src,int idx)1852 static void upload_matrix( r100ContextPtr rmesa, GLfloat *src, int idx )
1853 {
1854 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0;
1855 int i;
1856
1857
1858 for (i = 0 ; i < 4 ; i++) {
1859 *dest++ = src[i];
1860 *dest++ = src[i+4];
1861 *dest++ = src[i+8];
1862 *dest++ = src[i+12];
1863 }
1864
1865 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] );
1866 }
1867
upload_matrix_t(r100ContextPtr rmesa,GLfloat * src,int idx)1868 static void upload_matrix_t( r100ContextPtr rmesa, GLfloat *src, int idx )
1869 {
1870 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0;
1871 memcpy(dest, src, 16*sizeof(float));
1872 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] );
1873 }
1874
1875
update_texturematrix(struct gl_context * ctx)1876 static void update_texturematrix( struct gl_context *ctx )
1877 {
1878 r100ContextPtr rmesa = R100_CONTEXT( ctx );
1879 GLuint tpc = rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL];
1880 GLuint vs = rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL];
1881 int unit;
1882 GLuint texMatEnabled = 0;
1883 rmesa->NeedTexMatrix = 0;
1884 rmesa->TexMatColSwap = 0;
1885
1886 for (unit = 0 ; unit < ctx->Const.MaxTextureUnits; unit++) {
1887 if (ctx->Texture.Unit[unit]._Current) {
1888 GLboolean needMatrix = GL_FALSE;
1889 if (ctx->TextureMatrixStack[unit].Top->type != MATRIX_IDENTITY) {
1890 needMatrix = GL_TRUE;
1891 texMatEnabled |= (RADEON_TEXGEN_TEXMAT_0_ENABLE |
1892 RADEON_TEXMAT_0_ENABLE) << unit;
1893
1894 if (rmesa->TexGenEnabled & (RADEON_TEXMAT_0_ENABLE << unit)) {
1895 /* Need to preconcatenate any active texgen
1896 * obj/eyeplane matrices:
1897 */
1898 _math_matrix_mul_matrix( &rmesa->tmpmat[unit],
1899 ctx->TextureMatrixStack[unit].Top,
1900 &rmesa->TexGenMatrix[unit] );
1901 }
1902 else {
1903 _math_matrix_copy( &rmesa->tmpmat[unit],
1904 ctx->TextureMatrixStack[unit].Top );
1905 }
1906 }
1907 else if (rmesa->TexGenEnabled & (RADEON_TEXMAT_0_ENABLE << unit)) {
1908 _math_matrix_copy( &rmesa->tmpmat[unit], &rmesa->TexGenMatrix[unit] );
1909 needMatrix = GL_TRUE;
1910 }
1911 if (needMatrix) {
1912 rmesa->NeedTexMatrix |= 1 << unit;
1913 radeonUploadTexMatrix( rmesa, unit,
1914 !ctx->Texture.Unit[unit].TexGenEnabled );
1915 }
1916 }
1917 }
1918
1919 tpc = (texMatEnabled | rmesa->TexGenEnabled);
1920
1921 /* TCL_TEX_COMPUTED_x is TCL_TEX_INPUT_x | 0x8 */
1922 vs &= ~((RADEON_TCL_TEX_COMPUTED_TEX_0 << RADEON_TCL_TEX_0_OUTPUT_SHIFT) |
1923 (RADEON_TCL_TEX_COMPUTED_TEX_0 << RADEON_TCL_TEX_1_OUTPUT_SHIFT) |
1924 (RADEON_TCL_TEX_COMPUTED_TEX_0 << RADEON_TCL_TEX_2_OUTPUT_SHIFT));
1925
1926 vs |= (((tpc & RADEON_TEXGEN_TEXMAT_0_ENABLE) <<
1927 (RADEON_TCL_TEX_0_OUTPUT_SHIFT + 3)) |
1928 ((tpc & RADEON_TEXGEN_TEXMAT_1_ENABLE) <<
1929 (RADEON_TCL_TEX_1_OUTPUT_SHIFT + 2)) |
1930 ((tpc & RADEON_TEXGEN_TEXMAT_2_ENABLE) <<
1931 (RADEON_TCL_TEX_2_OUTPUT_SHIFT + 1)));
1932
1933 if (tpc != rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL] ||
1934 vs != rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL]) {
1935
1936 RADEON_STATECHANGE(rmesa, tcl);
1937 rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL] = tpc;
1938 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] = vs;
1939 }
1940 }
1941
r100ValidateBuffers(struct gl_context * ctx)1942 GLboolean r100ValidateBuffers(struct gl_context *ctx)
1943 {
1944 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1945 struct radeon_renderbuffer *rrb;
1946 int i, ret;
1947
1948 radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);
1949
1950 rrb = radeon_get_colorbuffer(&rmesa->radeon);
1951 /* color buffer */
1952 if (rrb && rrb->bo) {
1953 radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
1954 0, RADEON_GEM_DOMAIN_VRAM);
1955 }
1956
1957 /* depth buffer */
1958 rrb = radeon_get_depthbuffer(&rmesa->radeon);
1959 /* color buffer */
1960 if (rrb && rrb->bo) {
1961 radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
1962 0, RADEON_GEM_DOMAIN_VRAM);
1963 }
1964
1965 for (i = 0; i < ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; ++i) {
1966 radeonTexObj *t;
1967
1968 if (!ctx->Texture.Unit[i]._Current)
1969 continue;
1970
1971 t = rmesa->state.texture.unit[i].texobj;
1972
1973 if (!t)
1974 continue;
1975 if (t->image_override && t->bo)
1976 radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->bo,
1977 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
1978 else if (t->mt->bo)
1979 radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->mt->bo,
1980 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
1981 }
1982
1983 ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
1984 if (ret)
1985 return GL_FALSE;
1986 return GL_TRUE;
1987 }
1988
radeonValidateState(struct gl_context * ctx)1989 GLboolean radeonValidateState( struct gl_context *ctx )
1990 {
1991 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1992 GLuint new_state = rmesa->radeon.NewGLState;
1993
1994 if (new_state & _NEW_BUFFERS) {
1995 _mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
1996 /* this updates the DrawBuffer's Width/Height if it's a FBO */
1997 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
1998 RADEON_STATECHANGE(rmesa, ctx);
1999 }
2000
2001 if (new_state & _NEW_TEXTURE) {
2002 radeonUpdateTextureState( ctx );
2003 new_state |= rmesa->radeon.NewGLState; /* may add TEXTURE_MATRIX */
2004 }
2005
2006 /* we need to do a space check here */
2007 if (!r100ValidateBuffers(ctx))
2008 return GL_FALSE;
2009
2010 /* Need an event driven matrix update?
2011 */
2012 if (new_state & (_NEW_MODELVIEW|_NEW_PROJECTION))
2013 upload_matrix( rmesa, ctx->_ModelProjectMatrix.m, MODEL_PROJ );
2014
2015 /* Need these for lighting (shouldn't upload otherwise)
2016 */
2017 if (new_state & (_NEW_MODELVIEW)) {
2018 upload_matrix( rmesa, ctx->ModelviewMatrixStack.Top->m, MODEL );
2019 upload_matrix_t( rmesa, ctx->ModelviewMatrixStack.Top->inv, MODEL_IT );
2020 }
2021
2022 /* Does this need to be triggered on eg. modelview for
2023 * texgen-derived objplane/eyeplane matrices?
2024 */
2025 if (new_state & _NEW_TEXTURE_MATRIX) {
2026 update_texturematrix( ctx );
2027 }
2028
2029 if (new_state & (_NEW_LIGHT|_NEW_MODELVIEW|_MESA_NEW_NEED_EYE_COORDS)) {
2030 update_light( ctx );
2031 }
2032
2033 /* emit all active clip planes if projection matrix changes.
2034 */
2035 if (new_state & (_NEW_PROJECTION)) {
2036 if (ctx->Transform.ClipPlanesEnabled)
2037 radeonUpdateClipPlanes( ctx );
2038 }
2039
2040
2041 rmesa->radeon.NewGLState = 0;
2042
2043 return GL_TRUE;
2044 }
2045
2046
radeonInvalidateState(struct gl_context * ctx)2047 static void radeonInvalidateState(struct gl_context *ctx)
2048 {
2049 GLuint new_state = ctx->NewState;
2050
2051 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
2052 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
2053
2054 _swrast_InvalidateState( ctx, new_state );
2055 _swsetup_InvalidateState( ctx, new_state );
2056 _tnl_InvalidateState( ctx, new_state );
2057 R100_CONTEXT(ctx)->radeon.NewGLState |= new_state;
2058 }
2059
2060
2061 /* A hack. Need a faster way to find this out.
2062 */
check_material(struct gl_context * ctx)2063 static GLboolean check_material( struct gl_context *ctx )
2064 {
2065 TNLcontext *tnl = TNL_CONTEXT(ctx);
2066 GLint i;
2067
2068 for (i = _TNL_ATTRIB_MAT_FRONT_AMBIENT;
2069 i < _TNL_ATTRIB_MAT_BACK_INDEXES;
2070 i++)
2071 if (tnl->vb.AttribPtr[i] &&
2072 tnl->vb.AttribPtr[i]->stride)
2073 return GL_TRUE;
2074
2075 return GL_FALSE;
2076 }
2077
2078
radeonWrapRunPipeline(struct gl_context * ctx)2079 static void radeonWrapRunPipeline( struct gl_context *ctx )
2080 {
2081 r100ContextPtr rmesa = R100_CONTEXT(ctx);
2082 GLboolean has_material;
2083
2084 if (0)
2085 fprintf(stderr, "%s, newstate: %x\n", __func__, rmesa->radeon.NewGLState);
2086
2087 /* Validate state:
2088 */
2089 if (rmesa->radeon.NewGLState)
2090 if (!radeonValidateState( ctx ))
2091 FALLBACK(rmesa, RADEON_FALLBACK_TEXTURE, GL_TRUE);
2092
2093 has_material = (ctx->Light.Enabled && check_material( ctx ));
2094
2095 if (has_material) {
2096 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_MATERIAL, GL_TRUE );
2097 }
2098
2099 /* Run the pipeline.
2100 */
2101 _tnl_run_pipeline( ctx );
2102
2103 if (has_material) {
2104 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_MATERIAL, GL_FALSE );
2105 }
2106 }
2107
radeonPolygonStipple(struct gl_context * ctx,const GLubyte * mask)2108 static void radeonPolygonStipple( struct gl_context *ctx, const GLubyte *mask )
2109 {
2110 r100ContextPtr r100 = R100_CONTEXT(ctx);
2111 GLint i;
2112
2113 radeon_firevertices(&r100->radeon);
2114
2115 RADEON_STATECHANGE(r100, stp);
2116
2117 /* Must flip pattern upside down.
2118 */
2119 for ( i = 31 ; i >= 0; i--) {
2120 r100->hw.stp.cmd[3 + i] = ((GLuint *) mask)[i];
2121 }
2122 }
2123
2124
2125 /* Initialize the driver's state functions.
2126 * Many of the ctx->Driver functions might have been initialized to
2127 * software defaults in the earlier _mesa_init_driver_functions() call.
2128 */
radeonInitStateFuncs(struct gl_context * ctx)2129 void radeonInitStateFuncs( struct gl_context *ctx )
2130 {
2131 ctx->Driver.UpdateState = radeonInvalidateState;
2132 ctx->Driver.LightingSpaceChange = radeonLightingSpaceChange;
2133
2134 ctx->Driver.DrawBuffer = radeonDrawBuffer;
2135 ctx->Driver.ReadBuffer = radeonReadBuffer;
2136 ctx->Driver.CopyPixels = _mesa_meta_CopyPixels;
2137 ctx->Driver.DrawPixels = _mesa_meta_DrawPixels;
2138 ctx->Driver.ReadPixels = radeonReadPixels;
2139
2140 ctx->Driver.AlphaFunc = radeonAlphaFunc;
2141 ctx->Driver.BlendEquationSeparate = radeonBlendEquationSeparate;
2142 ctx->Driver.BlendFuncSeparate = radeonBlendFuncSeparate;
2143 ctx->Driver.ClipPlane = radeonClipPlane;
2144 ctx->Driver.ColorMask = radeonColorMask;
2145 ctx->Driver.CullFace = radeonCullFace;
2146 ctx->Driver.DepthFunc = radeonDepthFunc;
2147 ctx->Driver.DepthMask = radeonDepthMask;
2148 ctx->Driver.DepthRange = radeonDepthRange;
2149 ctx->Driver.Enable = radeonEnable;
2150 ctx->Driver.Fogfv = radeonFogfv;
2151 ctx->Driver.FrontFace = radeonFrontFace;
2152 ctx->Driver.LightModelfv = radeonLightModelfv;
2153 ctx->Driver.Lightfv = radeonLightfv;
2154 ctx->Driver.LineStipple = radeonLineStipple;
2155 ctx->Driver.LineWidth = radeonLineWidth;
2156 ctx->Driver.LogicOpcode = radeonLogicOpCode;
2157 ctx->Driver.PolygonMode = radeonPolygonMode;
2158 ctx->Driver.PolygonOffset = radeonPolygonOffset;
2159 ctx->Driver.PolygonStipple = radeonPolygonStipple;
2160 ctx->Driver.RenderMode = radeonRenderMode;
2161 ctx->Driver.Scissor = radeonScissor;
2162 ctx->Driver.ShadeModel = radeonShadeModel;
2163 ctx->Driver.StencilFuncSeparate = radeonStencilFuncSeparate;
2164 ctx->Driver.StencilMaskSeparate = radeonStencilMaskSeparate;
2165 ctx->Driver.StencilOpSeparate = radeonStencilOpSeparate;
2166 ctx->Driver.Viewport = radeonViewport;
2167
2168 TNL_CONTEXT(ctx)->Driver.NotifyMaterialChange = radeonUpdateMaterial;
2169 TNL_CONTEXT(ctx)->Driver.RunPipeline = radeonWrapRunPipeline;
2170 }
2171