1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "draw/draw_context.h"
25
26 #include "util/u_framebuffer.h"
27 #include "util/u_half.h"
28 #include "util/u_helpers.h"
29 #include "util/u_math.h"
30 #include "util/u_mm.h"
31 #include "util/u_memory.h"
32 #include "util/u_pack_color.h"
33 #include "util/u_transfer.h"
34
35 #include "tgsi/tgsi_parse.h"
36
37 #include "pipe/p_config.h"
38
39 #include "r300_cb.h"
40 #include "r300_context.h"
41 #include "r300_emit.h"
42 #include "r300_reg.h"
43 #include "r300_screen.h"
44 #include "r300_screen_buffer.h"
45 #include "r300_state_inlines.h"
46 #include "r300_fs.h"
47 #include "r300_texture.h"
48 #include "r300_vs.h"
49
50 /* r300_state: Functions used to intialize state context by translating
51 * Gallium state objects into semi-native r300 state objects. */
52
53 #define UPDATE_STATE(cso, atom) \
54 if (cso != atom.state) { \
55 atom.state = cso; \
56 r300_mark_atom_dirty(r300, &(atom)); \
57 }
58
blend_discard_if_src_alpha_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)59 static boolean blend_discard_if_src_alpha_0(unsigned srcRGB, unsigned srcA,
60 unsigned dstRGB, unsigned dstA)
61 {
62 /* If the blend equation is ADD or REVERSE_SUBTRACT,
63 * SRC_ALPHA == 0, and the following state is set, the colorbuffer
64 * will not be changed.
65 * Notice that the dst factors are the src factors inverted. */
66 return (srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
67 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
68 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
69 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
70 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
71 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
72 srcA == PIPE_BLENDFACTOR_ZERO) &&
73 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
74 dstRGB == PIPE_BLENDFACTOR_ONE) &&
75 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
76 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
77 dstA == PIPE_BLENDFACTOR_ONE);
78 }
79
blend_discard_if_src_alpha_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)80 static boolean blend_discard_if_src_alpha_1(unsigned srcRGB, unsigned srcA,
81 unsigned dstRGB, unsigned dstA)
82 {
83 /* If the blend equation is ADD or REVERSE_SUBTRACT,
84 * SRC_ALPHA == 1, and the following state is set, the colorbuffer
85 * will not be changed.
86 * Notice that the dst factors are the src factors inverted. */
87 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
88 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
89 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
90 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
91 srcA == PIPE_BLENDFACTOR_ZERO) &&
92 (dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
93 dstRGB == PIPE_BLENDFACTOR_ONE) &&
94 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
95 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
96 dstA == PIPE_BLENDFACTOR_ONE);
97 }
98
blend_discard_if_src_color_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)99 static boolean blend_discard_if_src_color_0(unsigned srcRGB, unsigned srcA,
100 unsigned dstRGB, unsigned dstA)
101 {
102 /* If the blend equation is ADD or REVERSE_SUBTRACT,
103 * SRC_COLOR == (0,0,0), and the following state is set, the colorbuffer
104 * will not be changed.
105 * Notice that the dst factors are the src factors inverted. */
106 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
107 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
108 (srcA == PIPE_BLENDFACTOR_ZERO) &&
109 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
110 dstRGB == PIPE_BLENDFACTOR_ONE) &&
111 (dstA == PIPE_BLENDFACTOR_ONE);
112 }
113
blend_discard_if_src_color_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)114 static boolean blend_discard_if_src_color_1(unsigned srcRGB, unsigned srcA,
115 unsigned dstRGB, unsigned dstA)
116 {
117 /* If the blend equation is ADD or REVERSE_SUBTRACT,
118 * SRC_COLOR == (1,1,1), and the following state is set, the colorbuffer
119 * will not be changed.
120 * Notice that the dst factors are the src factors inverted. */
121 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
122 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
123 (srcA == PIPE_BLENDFACTOR_ZERO) &&
124 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
125 dstRGB == PIPE_BLENDFACTOR_ONE) &&
126 (dstA == PIPE_BLENDFACTOR_ONE);
127 }
128
blend_discard_if_src_alpha_color_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)129 static boolean blend_discard_if_src_alpha_color_0(unsigned srcRGB, unsigned srcA,
130 unsigned dstRGB, unsigned dstA)
131 {
132 /* If the blend equation is ADD or REVERSE_SUBTRACT,
133 * SRC_ALPHA_COLOR == (0,0,0,0), and the following state is set,
134 * the colorbuffer will not be changed.
135 * Notice that the dst factors are the src factors inverted. */
136 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
137 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
138 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
139 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
140 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
141 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
142 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
143 srcA == PIPE_BLENDFACTOR_ZERO) &&
144 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
145 dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
146 dstRGB == PIPE_BLENDFACTOR_ONE) &&
147 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
148 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
149 dstA == PIPE_BLENDFACTOR_ONE);
150 }
151
blend_discard_if_src_alpha_color_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)152 static boolean blend_discard_if_src_alpha_color_1(unsigned srcRGB, unsigned srcA,
153 unsigned dstRGB, unsigned dstA)
154 {
155 /* If the blend equation is ADD or REVERSE_SUBTRACT,
156 * SRC_ALPHA_COLOR == (1,1,1,1), and the following state is set,
157 * the colorbuffer will not be changed.
158 * Notice that the dst factors are the src factors inverted. */
159 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
160 srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
161 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
162 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
163 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
164 srcA == PIPE_BLENDFACTOR_ZERO) &&
165 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
166 dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
167 dstRGB == PIPE_BLENDFACTOR_ONE) &&
168 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
169 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
170 dstA == PIPE_BLENDFACTOR_ONE);
171 }
172
blend_discard_conditionally(unsigned eqRGB,unsigned eqA,unsigned dstRGB,unsigned dstA,unsigned srcRGB,unsigned srcA)173 static unsigned blend_discard_conditionally(unsigned eqRGB, unsigned eqA,
174 unsigned dstRGB, unsigned dstA,
175 unsigned srcRGB, unsigned srcA)
176 {
177 unsigned blend_control = 0;
178
179 /* Optimization: discard pixels which don't change the colorbuffer.
180 *
181 * The code below is non-trivial and some math is involved.
182 *
183 * Discarding pixels must be disabled when FP16 AA is enabled.
184 * This is a hardware bug. Also, this implementation wouldn't work
185 * with FP blending enabled and equation clamping disabled.
186 *
187 * Equations other than ADD are rarely used and therefore won't be
188 * optimized. */
189 if ((eqRGB == PIPE_BLEND_ADD || eqRGB == PIPE_BLEND_REVERSE_SUBTRACT) &&
190 (eqA == PIPE_BLEND_ADD || eqA == PIPE_BLEND_REVERSE_SUBTRACT)) {
191 /* ADD: X+Y
192 * REVERSE_SUBTRACT: Y-X
193 *
194 * The idea is:
195 * If X = src*srcFactor = 0 and Y = dst*dstFactor = 1,
196 * then CB will not be changed.
197 *
198 * Given the srcFactor and dstFactor variables, we can derive
199 * what src and dst should be equal to and discard appropriate
200 * pixels.
201 */
202 if (blend_discard_if_src_alpha_0(srcRGB, srcA, dstRGB, dstA)) {
203 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0;
204 } else if (blend_discard_if_src_alpha_1(srcRGB, srcA,
205 dstRGB, dstA)) {
206 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1;
207 } else if (blend_discard_if_src_color_0(srcRGB, srcA,
208 dstRGB, dstA)) {
209 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_0;
210 } else if (blend_discard_if_src_color_1(srcRGB, srcA,
211 dstRGB, dstA)) {
212 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_1;
213 } else if (blend_discard_if_src_alpha_color_0(srcRGB, srcA,
214 dstRGB, dstA)) {
215 blend_control |=
216 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0;
217 } else if (blend_discard_if_src_alpha_color_1(srcRGB, srcA,
218 dstRGB, dstA)) {
219 blend_control |=
220 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1;
221 }
222 }
223 return blend_control;
224 }
225
226 /* The hardware colormask is clunky a must be swizzled depending on the format.
227 * This was figured out by trial-and-error. */
bgra_cmask(unsigned mask)228 static unsigned bgra_cmask(unsigned mask)
229 {
230 return ((mask & PIPE_MASK_R) << 2) |
231 ((mask & PIPE_MASK_B) >> 2) |
232 (mask & (PIPE_MASK_G | PIPE_MASK_A));
233 }
234
rgba_cmask(unsigned mask)235 static unsigned rgba_cmask(unsigned mask)
236 {
237 return mask & PIPE_MASK_RGBA;
238 }
239
rrrr_cmask(unsigned mask)240 static unsigned rrrr_cmask(unsigned mask)
241 {
242 return (mask & PIPE_MASK_R) |
243 ((mask & PIPE_MASK_R) << 1) |
244 ((mask & PIPE_MASK_R) << 2) |
245 ((mask & PIPE_MASK_R) << 3);
246 }
247
aaaa_cmask(unsigned mask)248 static unsigned aaaa_cmask(unsigned mask)
249 {
250 return ((mask & PIPE_MASK_A) >> 3) |
251 ((mask & PIPE_MASK_A) >> 2) |
252 ((mask & PIPE_MASK_A) >> 1) |
253 (mask & PIPE_MASK_A);
254 }
255
grrg_cmask(unsigned mask)256 static unsigned grrg_cmask(unsigned mask)
257 {
258 return ((mask & PIPE_MASK_R) << 1) |
259 ((mask & PIPE_MASK_R) << 2) |
260 ((mask & PIPE_MASK_G) >> 1) |
261 ((mask & PIPE_MASK_G) << 2);
262 }
263
arra_cmask(unsigned mask)264 static unsigned arra_cmask(unsigned mask)
265 {
266 return ((mask & PIPE_MASK_R) << 1) |
267 ((mask & PIPE_MASK_R) << 2) |
268 ((mask & PIPE_MASK_A) >> 3) |
269 (mask & PIPE_MASK_A);
270 }
271
blend_read_enable(unsigned eqRGB,unsigned eqA,unsigned dstRGB,unsigned dstA,unsigned srcRGB,unsigned srcA,boolean src_alpha_optz)272 static unsigned blend_read_enable(unsigned eqRGB, unsigned eqA,
273 unsigned dstRGB, unsigned dstA,
274 unsigned srcRGB, unsigned srcA,
275 boolean src_alpha_optz)
276 {
277 unsigned blend_control = 0;
278
279 /* Optimization: some operations do not require the destination color.
280 *
281 * When SRC_ALPHA_SATURATE is used, colorbuffer reads must be enabled,
282 * otherwise blending gives incorrect results. It seems to be
283 * a hardware bug. */
284 if (eqRGB == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MIN ||
285 eqRGB == PIPE_BLEND_MAX || eqA == PIPE_BLEND_MAX ||
286 dstRGB != PIPE_BLENDFACTOR_ZERO ||
287 dstA != PIPE_BLENDFACTOR_ZERO ||
288 srcRGB == PIPE_BLENDFACTOR_DST_COLOR ||
289 srcRGB == PIPE_BLENDFACTOR_DST_ALPHA ||
290 srcRGB == PIPE_BLENDFACTOR_INV_DST_COLOR ||
291 srcRGB == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
292 srcA == PIPE_BLENDFACTOR_DST_COLOR ||
293 srcA == PIPE_BLENDFACTOR_DST_ALPHA ||
294 srcA == PIPE_BLENDFACTOR_INV_DST_COLOR ||
295 srcA == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
296 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE) {
297 /* Enable reading from the colorbuffer. */
298 blend_control |= R300_READ_ENABLE;
299
300 if (src_alpha_optz) {
301 /* Optimization: Depending on incoming pixels, we can
302 * conditionally disable the reading in hardware... */
303 if (eqRGB != PIPE_BLEND_MIN && eqA != PIPE_BLEND_MIN &&
304 eqRGB != PIPE_BLEND_MAX && eqA != PIPE_BLEND_MAX) {
305 /* Disable reading if SRC_ALPHA == 0. */
306 if ((dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
307 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
308 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
309 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
310 dstA == PIPE_BLENDFACTOR_ZERO) &&
311 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
312 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
313 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
314 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
315 blend_control |= R500_SRC_ALPHA_0_NO_READ;
316 }
317
318 /* Disable reading if SRC_ALPHA == 1. */
319 if ((dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
320 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
321 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
322 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
323 dstA == PIPE_BLENDFACTOR_ZERO) &&
324 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
325 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
326 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
327 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
328 blend_control |= R500_SRC_ALPHA_1_NO_READ;
329 }
330 }
331 }
332 }
333 return blend_control;
334 }
335
336 /* Create a new blend state based on the CSO blend state.
337 *
338 * This encompasses alpha blending, logic/raster ops, and blend dithering. */
r300_create_blend_state(struct pipe_context * pipe,const struct pipe_blend_state * state)339 static void* r300_create_blend_state(struct pipe_context* pipe,
340 const struct pipe_blend_state* state)
341 {
342 struct r300_screen* r300screen = r300_screen(pipe->screen);
343 struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state);
344 uint32_t blend_control = 0; /* R300_RB3D_CBLEND: 0x4e04 */
345 uint32_t blend_control_noclamp = 0; /* R300_RB3D_CBLEND: 0x4e04 */
346 uint32_t blend_control_noalpha = 0; /* R300_RB3D_CBLEND: 0x4e04 */
347 uint32_t blend_control_noalpha_noclamp = 0; /* R300_RB3D_CBLEND: 0x4e04 */
348 uint32_t alpha_blend_control = 0; /* R300_RB3D_ABLEND: 0x4e08 */
349 uint32_t alpha_blend_control_noclamp = 0; /* R300_RB3D_ABLEND: 0x4e08 */
350 uint32_t alpha_blend_control_noalpha = 0; /* R300_RB3D_ABLEND: 0x4e08 */
351 uint32_t alpha_blend_control_noalpha_noclamp = 0; /* R300_RB3D_ABLEND: 0x4e08 */
352 uint32_t rop = 0; /* R300_RB3D_ROPCNTL: 0x4e18 */
353 uint32_t dither = 0; /* R300_RB3D_DITHER_CTL: 0x4e50 */
354 int i;
355
356 const unsigned eqRGB = state->rt[0].rgb_func;
357 const unsigned srcRGB = state->rt[0].rgb_src_factor;
358 const unsigned dstRGB = state->rt[0].rgb_dst_factor;
359
360 const unsigned eqA = state->rt[0].alpha_func;
361 const unsigned srcA = state->rt[0].alpha_src_factor;
362 const unsigned dstA = state->rt[0].alpha_dst_factor;
363
364 unsigned srcRGBX = srcRGB;
365 unsigned dstRGBX = dstRGB;
366 CB_LOCALS;
367
368 blend->state = *state;
369
370 /* force DST_ALPHA to ONE where we can */
371 switch (srcRGBX) {
372 case PIPE_BLENDFACTOR_DST_ALPHA:
373 srcRGBX = PIPE_BLENDFACTOR_ONE;
374 break;
375 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
376 srcRGBX = PIPE_BLENDFACTOR_ZERO;
377 break;
378 }
379
380 switch (dstRGBX) {
381 case PIPE_BLENDFACTOR_DST_ALPHA:
382 dstRGBX = PIPE_BLENDFACTOR_ONE;
383 break;
384 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
385 dstRGBX = PIPE_BLENDFACTOR_ZERO;
386 break;
387 }
388
389 /* Get blending register values. */
390 if (state->rt[0].blend_enable) {
391 unsigned blend_eq, blend_eq_noclamp;
392
393 /* despite the name, ALPHA_BLEND_ENABLE has nothing to do with alpha,
394 * this is just the crappy D3D naming */
395 blend_control = blend_control_noclamp =
396 R300_ALPHA_BLEND_ENABLE |
397 ( r300_translate_blend_factor(srcRGB) << R300_SRC_BLEND_SHIFT) |
398 ( r300_translate_blend_factor(dstRGB) << R300_DST_BLEND_SHIFT);
399
400 blend_control_noalpha = blend_control_noalpha_noclamp =
401 R300_ALPHA_BLEND_ENABLE |
402 ( r300_translate_blend_factor(srcRGBX) << R300_SRC_BLEND_SHIFT) |
403 ( r300_translate_blend_factor(dstRGBX) << R300_DST_BLEND_SHIFT);
404
405 blend_eq = r300_translate_blend_function(eqRGB, TRUE);
406 blend_eq_noclamp = r300_translate_blend_function(eqRGB, FALSE);
407
408 blend_control |= blend_eq;
409 blend_control_noalpha |= blend_eq;
410 blend_control_noclamp |= blend_eq_noclamp;
411 blend_control_noalpha_noclamp |= blend_eq_noclamp;
412
413 /* Optimization: some operations do not require the destination color. */
414 blend_control |= blend_read_enable(eqRGB, eqA, dstRGB, dstA,
415 srcRGB, srcA, r300screen->caps.is_r500);
416 blend_control_noclamp |= blend_read_enable(eqRGB, eqA, dstRGB, dstA,
417 srcRGB, srcA, FALSE);
418 blend_control_noalpha |= blend_read_enable(eqRGB, eqA, dstRGBX, dstA,
419 srcRGBX, srcA, r300screen->caps.is_r500);
420 blend_control_noalpha_noclamp |= blend_read_enable(eqRGB, eqA, dstRGBX, dstA,
421 srcRGBX, srcA, FALSE);
422
423 /* Optimization: discard pixels which don't change the colorbuffer.
424 * It cannot be used with FP16 AA. */
425 blend_control |= blend_discard_conditionally(eqRGB, eqA, dstRGB, dstA,
426 srcRGB, srcA);
427 blend_control_noalpha |= blend_discard_conditionally(eqRGB, eqA, dstRGBX, dstA,
428 srcRGBX, srcA);
429
430 /* separate alpha */
431 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
432 blend_control |= R300_SEPARATE_ALPHA_ENABLE;
433 blend_control_noclamp |= R300_SEPARATE_ALPHA_ENABLE;
434
435 alpha_blend_control = alpha_blend_control_noclamp =
436 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
437 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
438 alpha_blend_control |= r300_translate_blend_function(eqA, TRUE);
439 alpha_blend_control_noclamp |= r300_translate_blend_function(eqA, FALSE);
440 }
441 if (srcA != srcRGBX || dstA != dstRGBX || eqA != eqRGB) {
442 blend_control_noalpha |= R300_SEPARATE_ALPHA_ENABLE;
443 blend_control_noalpha_noclamp |= R300_SEPARATE_ALPHA_ENABLE;
444
445 alpha_blend_control_noalpha = alpha_blend_control_noalpha_noclamp =
446 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
447 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
448 alpha_blend_control_noalpha |= r300_translate_blend_function(eqA, TRUE);
449 alpha_blend_control_noalpha_noclamp |= r300_translate_blend_function(eqA, FALSE);
450 }
451 }
452
453 /* PIPE_LOGICOP_* don't need to be translated, fortunately. */
454 if (state->logicop_enable) {
455 rop = R300_RB3D_ROPCNTL_ROP_ENABLE |
456 (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT;
457 }
458
459 /* Neither fglrx nor classic r300 ever set this, regardless of dithering
460 * state. Since it's an optional implementation detail, we can leave it
461 * out and never dither.
462 *
463 * This could be revisited if we ever get quality or conformance hints.
464 *
465 if (state->dither) {
466 dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT |
467 R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT;
468 }
469 */
470
471 /* Build a command buffer. */
472 {
473 unsigned (*func[COLORMASK_NUM_SWIZZLES])(unsigned) = {
474 bgra_cmask,
475 rgba_cmask,
476 rrrr_cmask,
477 aaaa_cmask,
478 grrg_cmask,
479 arra_cmask,
480 bgra_cmask,
481 rgba_cmask
482 };
483
484 for (i = 0; i < COLORMASK_NUM_SWIZZLES; i++) {
485 boolean has_alpha = i != COLORMASK_RGBX && i != COLORMASK_BGRX;
486
487 BEGIN_CB(blend->cb_clamp[i], 8);
488 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
489 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
490 OUT_CB(has_alpha ? blend_control : blend_control_noalpha);
491 OUT_CB(has_alpha ? alpha_blend_control : alpha_blend_control_noalpha);
492 OUT_CB(func[i](state->rt[0].colormask));
493 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
494 END_CB;
495 }
496 }
497
498 /* Build a command buffer (for RGBA16F). */
499 BEGIN_CB(blend->cb_noclamp, 8);
500 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
501 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
502 OUT_CB(blend_control_noclamp);
503 OUT_CB(alpha_blend_control_noclamp);
504 OUT_CB(rgba_cmask(state->rt[0].colormask));
505 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
506 END_CB;
507
508 /* Build a command buffer (for RGB16F). */
509 BEGIN_CB(blend->cb_noclamp_noalpha, 8);
510 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
511 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
512 OUT_CB(blend_control_noalpha_noclamp);
513 OUT_CB(alpha_blend_control_noalpha_noclamp);
514 OUT_CB(rgba_cmask(state->rt[0].colormask));
515 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
516 END_CB;
517
518 /* The same as above, but with no colorbuffer reads and writes. */
519 BEGIN_CB(blend->cb_no_readwrite, 8);
520 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
521 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
522 OUT_CB(0);
523 OUT_CB(0);
524 OUT_CB(0);
525 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
526 END_CB;
527
528 return (void*)blend;
529 }
530
531 /* Bind blend state. */
r300_bind_blend_state(struct pipe_context * pipe,void * state)532 static void r300_bind_blend_state(struct pipe_context* pipe,
533 void* state)
534 {
535 struct r300_context* r300 = r300_context(pipe);
536 struct r300_blend_state *blend = (struct r300_blend_state*)state;
537 boolean last_alpha_to_one = r300->alpha_to_one;
538 boolean last_alpha_to_coverage = r300->alpha_to_coverage;
539
540 UPDATE_STATE(state, r300->blend_state);
541
542 if (!blend)
543 return;
544
545 r300->alpha_to_one = blend->state.alpha_to_one;
546 r300->alpha_to_coverage = blend->state.alpha_to_coverage;
547
548 if (r300->alpha_to_one != last_alpha_to_one && r300->msaa_enable &&
549 r300->fs_status == FRAGMENT_SHADER_VALID) {
550 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
551 }
552
553 if (r300->alpha_to_coverage != last_alpha_to_coverage &&
554 r300->msaa_enable) {
555 r300_mark_atom_dirty(r300, &r300->dsa_state);
556 }
557 }
558
559 /* Free blend state. */
r300_delete_blend_state(struct pipe_context * pipe,void * state)560 static void r300_delete_blend_state(struct pipe_context* pipe,
561 void* state)
562 {
563 FREE(state);
564 }
565
566 /* Convert float to 10bit integer */
float_to_fixed10(float f)567 static unsigned float_to_fixed10(float f)
568 {
569 return CLAMP((unsigned)(f * 1023.9f), 0, 1023);
570 }
571
572 /* Set blend color.
573 * Setup both R300 and R500 registers, figure out later which one to write. */
r300_set_blend_color(struct pipe_context * pipe,const struct pipe_blend_color * color)574 static void r300_set_blend_color(struct pipe_context* pipe,
575 const struct pipe_blend_color* color)
576 {
577 struct r300_context* r300 = r300_context(pipe);
578 struct pipe_framebuffer_state *fb = r300->fb_state.state;
579 struct r300_blend_color_state *state =
580 (struct r300_blend_color_state*)r300->blend_color_state.state;
581 struct pipe_blend_color c;
582 struct pipe_surface *cb;
583 float tmp;
584 CB_LOCALS;
585
586 state->state = *color; /* Save it, so that we can reuse it in set_fb_state */
587 c = *color;
588 cb = fb->nr_cbufs ? r300_get_nonnull_cb(fb, 0) : NULL;
589
590 /* The blend color is dependent on the colorbuffer format. */
591 if (cb) {
592 switch (cb->format) {
593 case PIPE_FORMAT_R8_UNORM:
594 case PIPE_FORMAT_L8_UNORM:
595 case PIPE_FORMAT_I8_UNORM:
596 c.color[1] = c.color[0];
597 break;
598
599 case PIPE_FORMAT_A8_UNORM:
600 c.color[1] = c.color[3];
601 break;
602
603 case PIPE_FORMAT_R8G8_UNORM:
604 c.color[2] = c.color[1];
605 break;
606
607 case PIPE_FORMAT_L8A8_UNORM:
608 case PIPE_FORMAT_R8A8_UNORM:
609 c.color[2] = c.color[3];
610 break;
611
612 case PIPE_FORMAT_R8G8B8A8_UNORM:
613 case PIPE_FORMAT_R8G8B8X8_UNORM:
614 tmp = c.color[0];
615 c.color[0] = c.color[2];
616 c.color[2] = tmp;
617 break;
618
619 default:;
620 }
621 }
622
623 if (r300->screen->caps.is_r500) {
624 BEGIN_CB(state->cb, 3);
625 OUT_CB_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
626
627 switch (cb ? cb->format : 0) {
628 case PIPE_FORMAT_R16G16B16A16_FLOAT:
629 case PIPE_FORMAT_R16G16B16X16_FLOAT:
630 OUT_CB(util_float_to_half(c.color[2]) |
631 (util_float_to_half(c.color[3]) << 16));
632 OUT_CB(util_float_to_half(c.color[0]) |
633 (util_float_to_half(c.color[1]) << 16));
634 break;
635
636 default:
637 OUT_CB(float_to_fixed10(c.color[0]) |
638 (float_to_fixed10(c.color[3]) << 16));
639 OUT_CB(float_to_fixed10(c.color[2]) |
640 (float_to_fixed10(c.color[1]) << 16));
641 }
642
643 END_CB;
644 } else {
645 union util_color uc;
646 util_pack_color(c.color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
647
648 BEGIN_CB(state->cb, 2);
649 OUT_CB_REG(R300_RB3D_BLEND_COLOR, uc.ui[0]);
650 END_CB;
651 }
652
653 r300_mark_atom_dirty(r300, &r300->blend_color_state);
654 }
655
r300_set_clip_state(struct pipe_context * pipe,const struct pipe_clip_state * state)656 static void r300_set_clip_state(struct pipe_context* pipe,
657 const struct pipe_clip_state* state)
658 {
659 struct r300_context* r300 = r300_context(pipe);
660 struct r300_clip_state *clip =
661 (struct r300_clip_state*)r300->clip_state.state;
662 CB_LOCALS;
663
664 if (r300->screen->caps.has_tcl) {
665 BEGIN_CB(clip->cb, r300->clip_state.size);
666 OUT_CB_REG(R300_VAP_PVS_VECTOR_INDX_REG,
667 (r300->screen->caps.is_r500 ?
668 R500_PVS_UCP_START : R300_PVS_UCP_START));
669 OUT_CB_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
670 OUT_CB_TABLE(state->ucp, 6 * 4);
671 END_CB;
672
673 r300_mark_atom_dirty(r300, &r300->clip_state);
674 } else {
675 draw_set_clip_state(r300->draw, state);
676 }
677 }
678
679 /* Create a new depth, stencil, and alpha state based on the CSO dsa state.
680 *
681 * This contains the depth buffer, stencil buffer, alpha test, and such.
682 * On the Radeon, depth and stencil buffer setup are intertwined, which is
683 * the reason for some of the strange-looking assignments across registers. */
r300_create_dsa_state(struct pipe_context * pipe,const struct pipe_depth_stencil_alpha_state * state)684 static void* r300_create_dsa_state(struct pipe_context* pipe,
685 const struct pipe_depth_stencil_alpha_state* state)
686 {
687 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500;
688 struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state);
689 CB_LOCALS;
690 uint32_t alpha_value_fp16 = 0;
691 uint32_t z_buffer_control = 0;
692 uint32_t z_stencil_control = 0;
693 uint32_t stencil_ref_mask = 0;
694 uint32_t stencil_ref_bf = 0;
695
696 dsa->dsa = *state;
697
698 /* Depth test setup. - separate write mask depth for decomp flush */
699 if (state->depth.writemask) {
700 z_buffer_control |= R300_Z_WRITE_ENABLE;
701 }
702
703 if (state->depth.enabled) {
704 z_buffer_control |= R300_Z_ENABLE;
705
706 z_stencil_control |=
707 (r300_translate_depth_stencil_function(state->depth.func) <<
708 R300_Z_FUNC_SHIFT);
709 }
710
711 /* Stencil buffer setup. */
712 if (state->stencil[0].enabled) {
713 z_buffer_control |= R300_STENCIL_ENABLE;
714 z_stencil_control |=
715 (r300_translate_depth_stencil_function(state->stencil[0].func) <<
716 R300_S_FRONT_FUNC_SHIFT) |
717 (r300_translate_stencil_op(state->stencil[0].fail_op) <<
718 R300_S_FRONT_SFAIL_OP_SHIFT) |
719 (r300_translate_stencil_op(state->stencil[0].zpass_op) <<
720 R300_S_FRONT_ZPASS_OP_SHIFT) |
721 (r300_translate_stencil_op(state->stencil[0].zfail_op) <<
722 R300_S_FRONT_ZFAIL_OP_SHIFT);
723
724 stencil_ref_mask =
725 (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) |
726 (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT);
727
728 if (state->stencil[1].enabled) {
729 dsa->two_sided = TRUE;
730
731 z_buffer_control |= R300_STENCIL_FRONT_BACK;
732 z_stencil_control |=
733 (r300_translate_depth_stencil_function(state->stencil[1].func) <<
734 R300_S_BACK_FUNC_SHIFT) |
735 (r300_translate_stencil_op(state->stencil[1].fail_op) <<
736 R300_S_BACK_SFAIL_OP_SHIFT) |
737 (r300_translate_stencil_op(state->stencil[1].zpass_op) <<
738 R300_S_BACK_ZPASS_OP_SHIFT) |
739 (r300_translate_stencil_op(state->stencil[1].zfail_op) <<
740 R300_S_BACK_ZFAIL_OP_SHIFT);
741
742 stencil_ref_bf =
743 (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) |
744 (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT);
745
746 if (is_r500) {
747 z_buffer_control |= R500_STENCIL_REFMASK_FRONT_BACK;
748 } else {
749 dsa->two_sided_stencil_ref =
750 (state->stencil[0].valuemask != state->stencil[1].valuemask ||
751 state->stencil[0].writemask != state->stencil[1].writemask);
752 }
753 }
754 }
755
756 /* Alpha test setup. */
757 if (state->alpha.enabled) {
758 dsa->alpha_function =
759 r300_translate_alpha_function(state->alpha.func) |
760 R300_FG_ALPHA_FUNC_ENABLE;
761
762 dsa->alpha_function |= float_to_ubyte(state->alpha.ref_value);
763 alpha_value_fp16 = util_float_to_half(state->alpha.ref_value);
764 }
765
766 BEGIN_CB(&dsa->cb_begin, 8);
767 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
768 OUT_CB(z_buffer_control);
769 OUT_CB(z_stencil_control);
770 OUT_CB(stencil_ref_mask);
771 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, stencil_ref_bf);
772 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
773 END_CB;
774
775 BEGIN_CB(dsa->cb_zb_no_readwrite, 8);
776 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
777 OUT_CB(0);
778 OUT_CB(0);
779 OUT_CB(0);
780 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, 0);
781 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
782 END_CB;
783
784 return (void*)dsa;
785 }
786
r300_dsa_inject_stencilref(struct r300_context * r300)787 static void r300_dsa_inject_stencilref(struct r300_context *r300)
788 {
789 struct r300_dsa_state *dsa =
790 (struct r300_dsa_state*)r300->dsa_state.state;
791
792 if (!dsa)
793 return;
794
795 dsa->stencil_ref_mask =
796 (dsa->stencil_ref_mask & ~R300_STENCILREF_MASK) |
797 r300->stencil_ref.ref_value[0];
798 dsa->stencil_ref_bf =
799 (dsa->stencil_ref_bf & ~R300_STENCILREF_MASK) |
800 r300->stencil_ref.ref_value[1];
801 }
802
803 /* Bind DSA state. */
r300_bind_dsa_state(struct pipe_context * pipe,void * state)804 static void r300_bind_dsa_state(struct pipe_context* pipe,
805 void* state)
806 {
807 struct r300_context* r300 = r300_context(pipe);
808
809 if (!state) {
810 return;
811 }
812
813 UPDATE_STATE(state, r300->dsa_state);
814
815 r300_mark_atom_dirty(r300, &r300->hyperz_state); /* Will be updated before the emission. */
816 r300_dsa_inject_stencilref(r300);
817 }
818
819 /* Free DSA state. */
r300_delete_dsa_state(struct pipe_context * pipe,void * state)820 static void r300_delete_dsa_state(struct pipe_context* pipe,
821 void* state)
822 {
823 FREE(state);
824 }
825
r300_set_stencil_ref(struct pipe_context * pipe,const struct pipe_stencil_ref * sr)826 static void r300_set_stencil_ref(struct pipe_context* pipe,
827 const struct pipe_stencil_ref* sr)
828 {
829 struct r300_context* r300 = r300_context(pipe);
830
831 r300->stencil_ref = *sr;
832
833 r300_dsa_inject_stencilref(r300);
834 r300_mark_atom_dirty(r300, &r300->dsa_state);
835 }
836
r300_print_fb_surf_info(struct pipe_surface * surf,unsigned index,const char * binding)837 static void r300_print_fb_surf_info(struct pipe_surface *surf, unsigned index,
838 const char *binding)
839 {
840 struct pipe_resource *tex = surf->texture;
841 struct r300_resource *rtex = r300_resource(tex);
842
843 fprintf(stderr,
844 "r300: %s[%i] Dim: %ix%i, Firstlayer: %i, "
845 "Lastlayer: %i, Level: %i, Format: %s\n"
846
847 "r300: TEX: Macro: %s, Micro: %s, "
848 "Dim: %ix%ix%i, LastLevel: %i, Format: %s\n",
849
850 binding, index, surf->width, surf->height,
851 surf->u.tex.first_layer, surf->u.tex.last_layer, surf->u.tex.level,
852 util_format_short_name(surf->format),
853
854 rtex->tex.macrotile[0] ? "YES" : " NO",
855 rtex->tex.microtile ? "YES" : " NO",
856 tex->width0, tex->height0, tex->depth0,
857 tex->last_level, util_format_short_name(surf->format));
858 }
859
r300_mark_fb_state_dirty(struct r300_context * r300,enum r300_fb_state_change change)860 void r300_mark_fb_state_dirty(struct r300_context *r300,
861 enum r300_fb_state_change change)
862 {
863 struct pipe_framebuffer_state *state = r300->fb_state.state;
864
865 r300_mark_atom_dirty(r300, &r300->gpu_flush);
866 r300_mark_atom_dirty(r300, &r300->fb_state);
867
868 /* What is marked as dirty depends on the enum r300_fb_state_change. */
869 if (change == R300_CHANGED_FB_STATE) {
870 r300_mark_atom_dirty(r300, &r300->aa_state);
871 r300_mark_atom_dirty(r300, &r300->dsa_state); /* for AlphaRef */
872 r300_set_blend_color(&r300->context, r300->blend_color_state.state);
873 }
874
875 if (change == R300_CHANGED_FB_STATE ||
876 change == R300_CHANGED_HYPERZ_FLAG) {
877 r300_mark_atom_dirty(r300, &r300->hyperz_state);
878 }
879
880 if (change == R300_CHANGED_FB_STATE ||
881 change == R300_CHANGED_MULTIWRITE) {
882 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined);
883 }
884
885 /* Now compute the fb_state atom size. */
886 r300->fb_state.size = 2 + (8 * state->nr_cbufs);
887
888 if (r300->cbzb_clear)
889 r300->fb_state.size += 10;
890 else if (state->zsbuf) {
891 r300->fb_state.size += 10;
892 if (r300->hyperz_enabled)
893 r300->fb_state.size += 8;
894 }
895
896 if (r300->cmask_in_use) {
897 r300->fb_state.size += 6;
898 if (r300->screen->caps.is_r500 && r300->screen->info.drm_minor >= 29) {
899 r300->fb_state.size += 3;
900 }
901 }
902
903 /* The size of the rest of atoms stays the same. */
904 }
905
906 static void
r300_set_framebuffer_state(struct pipe_context * pipe,const struct pipe_framebuffer_state * state)907 r300_set_framebuffer_state(struct pipe_context* pipe,
908 const struct pipe_framebuffer_state* state)
909 {
910 struct r300_context* r300 = r300_context(pipe);
911 struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
912 struct pipe_framebuffer_state *current_state = r300->fb_state.state;
913 unsigned max_width, max_height, i;
914 uint32_t zbuffer_bpp = 0;
915 boolean unlock_zbuffer = FALSE;
916
917 if (r300->screen->caps.is_r500) {
918 max_width = max_height = 4096;
919 } else if (r300->screen->caps.is_r400) {
920 max_width = max_height = 4021;
921 } else {
922 max_width = max_height = 2560;
923 }
924
925 if (state->width > max_width || state->height > max_height) {
926 fprintf(stderr, "r300: Implementation error: Render targets are too "
927 "big in %s, refusing to bind framebuffer state!\n", __FUNCTION__);
928 return;
929 }
930
931 if (current_state->zsbuf && r300->zmask_in_use && !r300->locked_zbuffer) {
932 /* There is a zmask in use, what are we gonna do? */
933 if (state->zsbuf) {
934 if (!pipe_surface_equal(current_state->zsbuf, state->zsbuf)) {
935 /* Decompress the currently bound zbuffer before we bind another one. */
936 r300_decompress_zmask(r300);
937 r300->hiz_in_use = FALSE;
938 }
939 } else {
940 /* We don't bind another zbuffer, so lock the current one. */
941 pipe_surface_reference(&r300->locked_zbuffer, current_state->zsbuf);
942 }
943 } else if (r300->locked_zbuffer) {
944 /* We have a locked zbuffer now, what are we gonna do? */
945 if (state->zsbuf) {
946 if (!pipe_surface_equal(r300->locked_zbuffer, state->zsbuf)) {
947 /* We are binding some other zbuffer, so decompress the locked one,
948 * it gets unlocked automatically. */
949 r300_decompress_zmask_locked_unsafe(r300);
950 r300->hiz_in_use = FALSE;
951 } else {
952 /* We are binding the locked zbuffer again, so unlock it. */
953 unlock_zbuffer = TRUE;
954 }
955 }
956 }
957 assert(state->zsbuf || (r300->locked_zbuffer && !unlock_zbuffer) || !r300->zmask_in_use);
958
959 /* If zsbuf is set from NULL to non-NULL or vice versa.. */
960 if (!!current_state->zsbuf != !!state->zsbuf) {
961 r300_mark_atom_dirty(r300, &r300->dsa_state);
962 }
963
964 util_copy_framebuffer_state(r300->fb_state.state, state);
965
966 /* Remove trailing NULL colorbuffers. */
967 while (current_state->nr_cbufs && !current_state->cbufs[current_state->nr_cbufs-1])
968 current_state->nr_cbufs--;
969
970 /* Set whether CMASK can be used. */
971 r300->cmask_in_use =
972 state->nr_cbufs == 1 && state->cbufs[0] &&
973 r300->screen->cmask_resource == state->cbufs[0]->texture;
974
975 /* Need to reset clamping or colormask. */
976 r300_mark_atom_dirty(r300, &r300->blend_state);
977
978 /* Re-swizzle the blend color. */
979 r300_set_blend_color(pipe, &((struct r300_blend_color_state*)r300->blend_color_state.state)->state);
980
981 if (unlock_zbuffer) {
982 pipe_surface_reference(&r300->locked_zbuffer, NULL);
983 }
984
985 r300_mark_fb_state_dirty(r300, R300_CHANGED_FB_STATE);
986
987 if (state->zsbuf) {
988 switch (util_format_get_blocksize(state->zsbuf->format)) {
989 case 2:
990 zbuffer_bpp = 16;
991 break;
992 case 4:
993 zbuffer_bpp = 24;
994 break;
995 }
996
997 /* Polygon offset depends on the zbuffer bit depth. */
998 if (r300->zbuffer_bpp != zbuffer_bpp) {
999 r300->zbuffer_bpp = zbuffer_bpp;
1000
1001 if (r300->polygon_offset_enabled)
1002 r300_mark_atom_dirty(r300, &r300->rs_state);
1003 }
1004 }
1005
1006 r300->num_samples = util_framebuffer_get_num_samples(state);
1007
1008 /* Set up AA config. */
1009 if (r300->num_samples > 1) {
1010 switch (r300->num_samples) {
1011 case 2:
1012 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
1013 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2;
1014 break;
1015 case 4:
1016 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
1017 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4;
1018 break;
1019 case 6:
1020 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
1021 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6;
1022 break;
1023 }
1024 } else {
1025 aa->aa_config = 0;
1026 }
1027
1028 if (DBG_ON(r300, DBG_FB)) {
1029 fprintf(stderr, "r300: set_framebuffer_state:\n");
1030 for (i = 0; i < state->nr_cbufs; i++) {
1031 if (state->cbufs[i])
1032 r300_print_fb_surf_info(state->cbufs[i], i, "CB");
1033 }
1034 if (state->zsbuf) {
1035 r300_print_fb_surf_info(state->zsbuf, 0, "ZB");
1036 }
1037 }
1038 }
1039
1040 /* Create fragment shader state. */
r300_create_fs_state(struct pipe_context * pipe,const struct pipe_shader_state * shader)1041 static void* r300_create_fs_state(struct pipe_context* pipe,
1042 const struct pipe_shader_state* shader)
1043 {
1044 struct r300_fragment_shader* fs = NULL;
1045
1046 fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader);
1047
1048 /* Copy state directly into shader. */
1049 fs->state = *shader;
1050 fs->state.tokens = tgsi_dup_tokens(shader->tokens);
1051
1052 return (void*)fs;
1053 }
1054
r300_mark_fs_code_dirty(struct r300_context * r300)1055 void r300_mark_fs_code_dirty(struct r300_context *r300)
1056 {
1057 struct r300_fragment_shader* fs = r300_fs(r300);
1058
1059 r300_mark_atom_dirty(r300, &r300->fs);
1060 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1061 r300_mark_atom_dirty(r300, &r300->fs_constants);
1062 r300->fs.size = fs->shader->cb_code_size;
1063
1064 if (r300->screen->caps.is_r500) {
1065 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 7;
1066 r300->fs_constants.size = fs->shader->externals_count * 4 + 3;
1067 } else {
1068 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 5;
1069 r300->fs_constants.size = fs->shader->externals_count * 4 + 1;
1070 }
1071
1072 ((struct r300_constant_buffer*)r300->fs_constants.state)->remap_table =
1073 fs->shader->code.constants_remap_table;
1074 }
1075
1076 /* Bind fragment shader state. */
r300_bind_fs_state(struct pipe_context * pipe,void * shader)1077 static void r300_bind_fs_state(struct pipe_context* pipe, void* shader)
1078 {
1079 struct r300_context* r300 = r300_context(pipe);
1080 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1081
1082 if (!fs) {
1083 r300->fs.state = NULL;
1084 return;
1085 }
1086
1087 r300->fs.state = fs;
1088 r300->fs_status = FRAGMENT_SHADER_DIRTY;
1089
1090 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1091 }
1092
1093 /* Delete fragment shader state. */
r300_delete_fs_state(struct pipe_context * pipe,void * shader)1094 static void r300_delete_fs_state(struct pipe_context* pipe, void* shader)
1095 {
1096 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1097 struct r300_fragment_shader_code *tmp, *ptr = fs->first;
1098
1099 while (ptr) {
1100 tmp = ptr;
1101 ptr = ptr->next;
1102 rc_constants_destroy(&tmp->code.constants);
1103 FREE(tmp->cb_code);
1104 FREE(tmp);
1105 }
1106 FREE((void*)fs->state.tokens);
1107 FREE(shader);
1108 }
1109
r300_set_polygon_stipple(struct pipe_context * pipe,const struct pipe_poly_stipple * state)1110 static void r300_set_polygon_stipple(struct pipe_context* pipe,
1111 const struct pipe_poly_stipple* state)
1112 {
1113 }
1114
1115 /* Create a new rasterizer state based on the CSO rasterizer state.
1116 *
1117 * This is a very large chunk of state, and covers most of the graphics
1118 * backend (GB), geometry assembly (GA), and setup unit (SU) blocks.
1119 *
1120 * In a not entirely unironic sidenote, this state has nearly nothing to do
1121 * with the actual block on the Radeon called the rasterizer (RS). */
r300_create_rs_state(struct pipe_context * pipe,const struct pipe_rasterizer_state * state)1122 static void* r300_create_rs_state(struct pipe_context* pipe,
1123 const struct pipe_rasterizer_state* state)
1124 {
1125 struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state);
1126 uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */
1127 uint32_t vap_clip_cntl; /* R300_VAP_CLIP_CNTL: 0x221C */
1128 uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */
1129 uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */
1130 uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */
1131 uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */
1132 uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */
1133 uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */
1134 uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */
1135 uint32_t polygon_mode; /* R300_GA_POLY_MODE: 0x4288 */
1136 uint32_t clip_rule; /* R300_SC_CLIP_RULE: 0x43D0 */
1137 uint32_t round_mode; /* R300_GA_ROUND_MODE: 0x428c */
1138
1139 /* Point sprites texture coordinates, 0: lower left, 1: upper right */
1140 float point_texcoord_left = 0; /* R300_GA_POINT_S0: 0x4200 */
1141 float point_texcoord_bottom = 0;/* R300_GA_POINT_T0: 0x4204 */
1142 float point_texcoord_right = 1; /* R300_GA_POINT_S1: 0x4208 */
1143 float point_texcoord_top = 0; /* R300_GA_POINT_T1: 0x420c */
1144 boolean vclamp = !r300_context(pipe)->screen->caps.is_r500;
1145 CB_LOCALS;
1146
1147 /* Copy rasterizer state. */
1148 rs->rs = *state;
1149 rs->rs_draw = *state;
1150
1151 rs->rs.sprite_coord_enable = state->point_quad_rasterization *
1152 state->sprite_coord_enable;
1153
1154 /* Override some states for Draw. */
1155 rs->rs_draw.sprite_coord_enable = 0; /* We can do this in HW. */
1156 rs->rs_draw.offset_point = 0;
1157 rs->rs_draw.offset_line = 0;
1158 rs->rs_draw.offset_tri = 0;
1159 rs->rs_draw.offset_clamp = 0;
1160
1161 #ifdef PIPE_ARCH_LITTLE_ENDIAN
1162 vap_control_status = R300_VC_NO_SWAP;
1163 #else
1164 vap_control_status = R300_VC_32BIT_SWAP;
1165 #endif
1166
1167 /* If no TCL engine is present, turn off the HW TCL. */
1168 if (!r300_screen(pipe->screen)->caps.has_tcl) {
1169 vap_control_status |= R300_VAP_TCL_BYPASS;
1170 }
1171
1172 /* Point size width and height. */
1173 point_size =
1174 pack_float_16_6x(state->point_size) |
1175 (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT);
1176
1177 /* Point size clamping. */
1178 if (state->point_size_per_vertex) {
1179 /* Per-vertex point size.
1180 * Clamp to [0, max FB size] */
1181 float min_psiz = util_get_min_point_size(state);
1182 float max_psiz = pipe->screen->get_paramf(pipe->screen,
1183 PIPE_CAPF_MAX_POINT_WIDTH);
1184 point_minmax =
1185 (pack_float_16_6x(min_psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1186 (pack_float_16_6x(max_psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1187 } else {
1188 /* We cannot disable the point-size vertex output,
1189 * so clamp it. */
1190 float psiz = state->point_size;
1191 point_minmax =
1192 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1193 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1194 }
1195
1196 /* Line control. */
1197 line_control = pack_float_16_6x(state->line_width) |
1198 R300_GA_LINE_CNTL_END_TYPE_COMP;
1199
1200 /* Enable polygon mode */
1201 polygon_mode = 0;
1202 if (state->fill_front != PIPE_POLYGON_MODE_FILL ||
1203 state->fill_back != PIPE_POLYGON_MODE_FILL) {
1204 polygon_mode = R300_GA_POLY_MODE_DUAL;
1205 }
1206
1207 /* Front face */
1208 if (state->front_ccw)
1209 cull_mode = R300_FRONT_FACE_CCW;
1210 else
1211 cull_mode = R300_FRONT_FACE_CW;
1212
1213 /* Polygon offset */
1214 polygon_offset_enable = 0;
1215 if (util_get_offset(state, state->fill_front)) {
1216 polygon_offset_enable |= R300_FRONT_ENABLE;
1217 }
1218 if (util_get_offset(state, state->fill_back)) {
1219 polygon_offset_enable |= R300_BACK_ENABLE;
1220 }
1221
1222 rs->polygon_offset_enable = polygon_offset_enable != 0;
1223
1224 /* Polygon mode */
1225 if (polygon_mode) {
1226 polygon_mode |=
1227 r300_translate_polygon_mode_front(state->fill_front);
1228 polygon_mode |=
1229 r300_translate_polygon_mode_back(state->fill_back);
1230 }
1231
1232 if (state->cull_face & PIPE_FACE_FRONT) {
1233 cull_mode |= R300_CULL_FRONT;
1234 }
1235 if (state->cull_face & PIPE_FACE_BACK) {
1236 cull_mode |= R300_CULL_BACK;
1237 }
1238
1239 if (state->line_stipple_enable) {
1240 line_stipple_config =
1241 R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE |
1242 (fui((float)state->line_stipple_factor) &
1243 R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK);
1244 /* XXX this might need to be scaled up */
1245 line_stipple_value = state->line_stipple_pattern;
1246 } else {
1247 line_stipple_config = 0;
1248 line_stipple_value = 0;
1249 }
1250
1251 if (state->flatshade) {
1252 rs->color_control = R300_SHADE_MODEL_FLAT;
1253 } else {
1254 rs->color_control = R300_SHADE_MODEL_SMOOTH;
1255 }
1256
1257 clip_rule = state->scissor ? 0xAAAA : 0xFFFF;
1258
1259 /* Point sprites coord mode */
1260 if (rs->rs.sprite_coord_enable) {
1261 switch (state->sprite_coord_mode) {
1262 case PIPE_SPRITE_COORD_UPPER_LEFT:
1263 point_texcoord_top = 0.0f;
1264 point_texcoord_bottom = 1.0f;
1265 break;
1266 case PIPE_SPRITE_COORD_LOWER_LEFT:
1267 point_texcoord_top = 1.0f;
1268 point_texcoord_bottom = 0.0f;
1269 break;
1270 }
1271 }
1272
1273 if (r300_screen(pipe->screen)->caps.has_tcl) {
1274 vap_clip_cntl = (state->clip_plane_enable & 63) |
1275 R300_PS_UCP_MODE_CLIP_AS_TRIFAN;
1276 } else {
1277 vap_clip_cntl = R300_CLIP_DISABLE;
1278 }
1279
1280 /* Vertex color clamping. FP20 means no clamping. */
1281 round_mode =
1282 R300_GA_ROUND_MODE_GEOMETRY_ROUND_NEAREST |
1283 (!vclamp ? (R300_GA_ROUND_MODE_RGB_CLAMP_FP20 |
1284 R300_GA_ROUND_MODE_ALPHA_CLAMP_FP20) : 0);
1285
1286 /* Build the main command buffer. */
1287 BEGIN_CB(rs->cb_main, RS_STATE_MAIN_SIZE);
1288 OUT_CB_REG(R300_VAP_CNTL_STATUS, vap_control_status);
1289 OUT_CB_REG(R300_VAP_CLIP_CNTL, vap_clip_cntl);
1290 OUT_CB_REG(R300_GA_POINT_SIZE, point_size);
1291 OUT_CB_REG_SEQ(R300_GA_POINT_MINMAX, 2);
1292 OUT_CB(point_minmax);
1293 OUT_CB(line_control);
1294 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE, 2);
1295 OUT_CB(polygon_offset_enable);
1296 rs->cull_mode_index = 11;
1297 OUT_CB(cull_mode);
1298 OUT_CB_REG(R300_GA_LINE_STIPPLE_CONFIG, line_stipple_config);
1299 OUT_CB_REG(R300_GA_LINE_STIPPLE_VALUE, line_stipple_value);
1300 OUT_CB_REG(R300_GA_POLY_MODE, polygon_mode);
1301 OUT_CB_REG(R300_GA_ROUND_MODE, round_mode);
1302 OUT_CB_REG(R300_SC_CLIP_RULE, clip_rule);
1303 OUT_CB_REG_SEQ(R300_GA_POINT_S0, 4);
1304 OUT_CB_32F(point_texcoord_left);
1305 OUT_CB_32F(point_texcoord_bottom);
1306 OUT_CB_32F(point_texcoord_right);
1307 OUT_CB_32F(point_texcoord_top);
1308 END_CB;
1309
1310 /* Build the two command buffers for polygon offset setup. */
1311 if (polygon_offset_enable) {
1312 float scale = state->offset_scale * 12;
1313 float offset = state->offset_units * 4;
1314
1315 BEGIN_CB(rs->cb_poly_offset_zb16, 5);
1316 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1317 OUT_CB_32F(scale);
1318 OUT_CB_32F(offset);
1319 OUT_CB_32F(scale);
1320 OUT_CB_32F(offset);
1321 END_CB;
1322
1323 offset = state->offset_units * 2;
1324
1325 BEGIN_CB(rs->cb_poly_offset_zb24, 5);
1326 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1327 OUT_CB_32F(scale);
1328 OUT_CB_32F(offset);
1329 OUT_CB_32F(scale);
1330 OUT_CB_32F(offset);
1331 END_CB;
1332 }
1333
1334 return (void*)rs;
1335 }
1336
1337 /* Bind rasterizer state. */
r300_bind_rs_state(struct pipe_context * pipe,void * state)1338 static void r300_bind_rs_state(struct pipe_context* pipe, void* state)
1339 {
1340 struct r300_context* r300 = r300_context(pipe);
1341 struct r300_rs_state* rs = (struct r300_rs_state*)state;
1342 int last_sprite_coord_enable = r300->sprite_coord_enable;
1343 boolean last_two_sided_color = r300->two_sided_color;
1344 boolean last_msaa_enable = r300->msaa_enable;
1345 boolean last_flatshade = r300->flatshade;
1346 boolean last_clip_halfz = r300->clip_halfz;
1347
1348 if (r300->draw && rs) {
1349 draw_set_rasterizer_state(r300->draw, &rs->rs_draw, state);
1350 }
1351
1352 if (rs) {
1353 r300->polygon_offset_enabled = rs->polygon_offset_enable;
1354 r300->sprite_coord_enable = rs->rs.sprite_coord_enable;
1355 r300->two_sided_color = rs->rs.light_twoside;
1356 r300->msaa_enable = rs->rs.multisample;
1357 r300->flatshade = rs->rs.flatshade;
1358 r300->clip_halfz = rs->rs.clip_halfz;
1359 } else {
1360 r300->polygon_offset_enabled = FALSE;
1361 r300->sprite_coord_enable = 0;
1362 r300->two_sided_color = FALSE;
1363 r300->msaa_enable = FALSE;
1364 r300->flatshade = FALSE;
1365 r300->clip_halfz = FALSE;
1366 }
1367
1368 UPDATE_STATE(state, r300->rs_state);
1369 r300->rs_state.size = RS_STATE_MAIN_SIZE + (r300->polygon_offset_enabled ? 5 : 0);
1370
1371 if (last_sprite_coord_enable != r300->sprite_coord_enable ||
1372 last_two_sided_color != r300->two_sided_color ||
1373 last_flatshade != r300->flatshade) {
1374 r300_mark_atom_dirty(r300, &r300->rs_block_state);
1375 }
1376
1377 if (last_msaa_enable != r300->msaa_enable) {
1378 if (r300->alpha_to_coverage) {
1379 r300_mark_atom_dirty(r300, &r300->dsa_state);
1380 }
1381
1382 if (r300->alpha_to_one &&
1383 r300->fs_status == FRAGMENT_SHADER_VALID) {
1384 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
1385 }
1386 }
1387
1388 if (r300->screen->caps.has_tcl && last_clip_halfz != r300->clip_halfz) {
1389 r300_mark_atom_dirty(r300, &r300->vs_state);
1390 }
1391 }
1392
1393 /* Free rasterizer state. */
r300_delete_rs_state(struct pipe_context * pipe,void * state)1394 static void r300_delete_rs_state(struct pipe_context* pipe, void* state)
1395 {
1396 FREE(state);
1397 }
1398
1399 static void*
r300_create_sampler_state(struct pipe_context * pipe,const struct pipe_sampler_state * state)1400 r300_create_sampler_state(struct pipe_context* pipe,
1401 const struct pipe_sampler_state* state)
1402 {
1403 struct r300_context* r300 = r300_context(pipe);
1404 struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state);
1405 boolean is_r500 = r300->screen->caps.is_r500;
1406 int lod_bias;
1407
1408 sampler->state = *state;
1409
1410 /* r300 doesn't handle CLAMP and MIRROR_CLAMP correctly when either MAG
1411 * or MIN filter is NEAREST. Since texwrap produces same results
1412 * for CLAMP and CLAMP_TO_EDGE, we use them instead. */
1413 if (sampler->state.min_img_filter == PIPE_TEX_FILTER_NEAREST ||
1414 sampler->state.mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
1415 /* Wrap S. */
1416 if (sampler->state.wrap_s == PIPE_TEX_WRAP_CLAMP)
1417 sampler->state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1418 else if (sampler->state.wrap_s == PIPE_TEX_WRAP_MIRROR_CLAMP)
1419 sampler->state.wrap_s = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1420
1421 /* Wrap T. */
1422 if (sampler->state.wrap_t == PIPE_TEX_WRAP_CLAMP)
1423 sampler->state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1424 else if (sampler->state.wrap_t == PIPE_TEX_WRAP_MIRROR_CLAMP)
1425 sampler->state.wrap_t = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1426
1427 /* Wrap R. */
1428 if (sampler->state.wrap_r == PIPE_TEX_WRAP_CLAMP)
1429 sampler->state.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1430 else if (sampler->state.wrap_r == PIPE_TEX_WRAP_MIRROR_CLAMP)
1431 sampler->state.wrap_r = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1432 }
1433
1434 sampler->filter0 |=
1435 (r300_translate_wrap(sampler->state.wrap_s) << R300_TX_WRAP_S_SHIFT) |
1436 (r300_translate_wrap(sampler->state.wrap_t) << R300_TX_WRAP_T_SHIFT) |
1437 (r300_translate_wrap(sampler->state.wrap_r) << R300_TX_WRAP_R_SHIFT);
1438
1439 sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter,
1440 state->mag_img_filter,
1441 state->min_mip_filter,
1442 state->max_anisotropy > 1);
1443
1444 sampler->filter0 |= r300_anisotropy(state->max_anisotropy);
1445
1446 /* Unfortunately, r300-r500 don't support floating-point mipmap lods. */
1447 /* We must pass these to the merge function to clamp them properly. */
1448 sampler->min_lod = (unsigned)MAX2(state->min_lod, 0);
1449 sampler->max_lod = (unsigned)MAX2(ceilf(state->max_lod), 0);
1450
1451 lod_bias = CLAMP((int)(state->lod_bias * 32 + 1), -(1 << 9), (1 << 9) - 1);
1452
1453 sampler->filter1 |= (lod_bias << R300_LOD_BIAS_SHIFT) & R300_LOD_BIAS_MASK;
1454
1455 /* This is very high quality anisotropic filtering for R5xx.
1456 * It's good for benchmarking the performance of texturing but
1457 * in practice we don't want to slow down the driver because it's
1458 * a pretty good performance killer. Feel free to play with it. */
1459 if (DBG_ON(r300, DBG_ANISOHQ) && is_r500) {
1460 sampler->filter1 |= r500_anisotropy(state->max_anisotropy);
1461 }
1462
1463 /* R500-specific fixups and optimizations */
1464 if (r300->screen->caps.is_r500) {
1465 sampler->filter1 |= R500_BORDER_FIX;
1466 }
1467
1468 return (void*)sampler;
1469 }
1470
r300_bind_sampler_states(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,void ** states)1471 static void r300_bind_sampler_states(struct pipe_context* pipe,
1472 enum pipe_shader_type shader,
1473 unsigned start, unsigned count,
1474 void** states)
1475 {
1476 struct r300_context* r300 = r300_context(pipe);
1477 struct r300_textures_state* state =
1478 (struct r300_textures_state*)r300->textures_state.state;
1479 unsigned tex_units = r300->screen->caps.num_tex_units;
1480
1481 assert(start == 0);
1482
1483 if (shader != PIPE_SHADER_FRAGMENT)
1484 return;
1485
1486 if (count > tex_units)
1487 return;
1488
1489 memcpy(state->sampler_states, states, sizeof(void*) * count);
1490 state->sampler_state_count = count;
1491
1492 r300_mark_atom_dirty(r300, &r300->textures_state);
1493 }
1494
r300_delete_sampler_state(struct pipe_context * pipe,void * state)1495 static void r300_delete_sampler_state(struct pipe_context* pipe, void* state)
1496 {
1497 FREE(state);
1498 }
1499
r300_assign_texture_cache_region(unsigned index,unsigned num)1500 static uint32_t r300_assign_texture_cache_region(unsigned index, unsigned num)
1501 {
1502 /* This looks like a hack, but I believe it's suppose to work like
1503 * that. To illustrate how this works, let's assume you have 5 textures.
1504 * From docs, 5 and the successive numbers are:
1505 *
1506 * FOURTH_1 = 5
1507 * FOURTH_2 = 6
1508 * FOURTH_3 = 7
1509 * EIGHTH_0 = 8
1510 * EIGHTH_1 = 9
1511 *
1512 * First 3 textures will get 3/4 of size of the cache, divived evenly
1513 * between them. The last 1/4 of the cache must be divided between
1514 * the last 2 textures, each will therefore get 1/8 of the cache.
1515 * Why not just to use "5 + texture_index" ?
1516 *
1517 * This simple trick works for all "num" <= 16.
1518 */
1519 if (num <= 1)
1520 return R300_TX_CACHE(R300_TX_CACHE_WHOLE);
1521 else
1522 return R300_TX_CACHE(num + index);
1523 }
1524
r300_set_sampler_views(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,struct pipe_sampler_view ** views)1525 static void r300_set_sampler_views(struct pipe_context* pipe,
1526 enum pipe_shader_type shader,
1527 unsigned start, unsigned count,
1528 struct pipe_sampler_view** views)
1529 {
1530 struct r300_context* r300 = r300_context(pipe);
1531 struct r300_textures_state* state =
1532 (struct r300_textures_state*)r300->textures_state.state;
1533 struct r300_resource *texture;
1534 unsigned i, real_num_views = 0, view_index = 0;
1535 unsigned tex_units = r300->screen->caps.num_tex_units;
1536 boolean dirty_tex = FALSE;
1537
1538 if (shader != PIPE_SHADER_FRAGMENT)
1539 return;
1540
1541 assert(start == 0); /* non-zero not handled yet */
1542
1543 if (count > tex_units) {
1544 return;
1545 }
1546
1547 /* Calculate the real number of views. */
1548 for (i = 0; i < count; i++) {
1549 if (views[i])
1550 real_num_views++;
1551 }
1552
1553 for (i = 0; i < count; i++) {
1554 pipe_sampler_view_reference(
1555 (struct pipe_sampler_view**)&state->sampler_views[i],
1556 views[i]);
1557
1558 if (!views[i]) {
1559 continue;
1560 }
1561
1562 /* A new sampler view (= texture)... */
1563 dirty_tex = TRUE;
1564
1565 /* Set the texrect factor in the fragment shader.
1566 * Needed for RECT and NPOT fallback. */
1567 texture = r300_resource(views[i]->texture);
1568 if (texture->tex.is_npot) {
1569 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1570 }
1571
1572 state->sampler_views[i]->texcache_region =
1573 r300_assign_texture_cache_region(view_index, real_num_views);
1574 view_index++;
1575 }
1576
1577 for (i = count; i < tex_units; i++) {
1578 if (state->sampler_views[i]) {
1579 pipe_sampler_view_reference(
1580 (struct pipe_sampler_view**)&state->sampler_views[i],
1581 NULL);
1582 }
1583 }
1584
1585 state->sampler_view_count = count;
1586
1587 r300_mark_atom_dirty(r300, &r300->textures_state);
1588
1589 if (dirty_tex) {
1590 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
1591 }
1592 }
1593
1594 struct pipe_sampler_view *
r300_create_sampler_view_custom(struct pipe_context * pipe,struct pipe_resource * texture,const struct pipe_sampler_view * templ,unsigned width0_override,unsigned height0_override)1595 r300_create_sampler_view_custom(struct pipe_context *pipe,
1596 struct pipe_resource *texture,
1597 const struct pipe_sampler_view *templ,
1598 unsigned width0_override,
1599 unsigned height0_override)
1600 {
1601 struct r300_sampler_view *view = CALLOC_STRUCT(r300_sampler_view);
1602 struct r300_resource *tex = r300_resource(texture);
1603 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500;
1604 boolean dxtc_swizzle = r300_screen(pipe->screen)->caps.dxtc_swizzle;
1605
1606 if (view) {
1607 unsigned hwformat;
1608
1609 view->base = *templ;
1610 view->base.reference.count = 1;
1611 view->base.context = pipe;
1612 view->base.texture = NULL;
1613 pipe_resource_reference(&view->base.texture, texture);
1614
1615 view->width0_override = width0_override;
1616 view->height0_override = height0_override;
1617 view->swizzle[0] = templ->swizzle_r;
1618 view->swizzle[1] = templ->swizzle_g;
1619 view->swizzle[2] = templ->swizzle_b;
1620 view->swizzle[3] = templ->swizzle_a;
1621
1622 hwformat = r300_translate_texformat(templ->format,
1623 view->swizzle,
1624 is_r500,
1625 dxtc_swizzle);
1626
1627 if (hwformat == ~0) {
1628 fprintf(stderr, "r300: Ooops. Got unsupported format %s in %s.\n",
1629 util_format_short_name(templ->format), __func__);
1630 }
1631 assert(hwformat != ~0);
1632
1633 r300_texture_setup_format_state(r300_screen(pipe->screen), tex,
1634 templ->format, 0,
1635 width0_override, height0_override,
1636 &view->format);
1637 view->format.format1 |= hwformat;
1638 if (is_r500) {
1639 view->format.format2 |= r500_tx_format_msb_bit(templ->format);
1640 }
1641 }
1642
1643 return (struct pipe_sampler_view*)view;
1644 }
1645
1646 static struct pipe_sampler_view *
r300_create_sampler_view(struct pipe_context * pipe,struct pipe_resource * texture,const struct pipe_sampler_view * templ)1647 r300_create_sampler_view(struct pipe_context *pipe,
1648 struct pipe_resource *texture,
1649 const struct pipe_sampler_view *templ)
1650 {
1651 return r300_create_sampler_view_custom(pipe, texture, templ,
1652 r300_resource(texture)->tex.width0,
1653 r300_resource(texture)->tex.height0);
1654 }
1655
1656
1657 static void
r300_sampler_view_destroy(struct pipe_context * pipe,struct pipe_sampler_view * view)1658 r300_sampler_view_destroy(struct pipe_context *pipe,
1659 struct pipe_sampler_view *view)
1660 {
1661 pipe_resource_reference(&view->texture, NULL);
1662 FREE(view);
1663 }
1664
r300_set_sample_mask(struct pipe_context * pipe,unsigned mask)1665 static void r300_set_sample_mask(struct pipe_context *pipe,
1666 unsigned mask)
1667 {
1668 struct r300_context* r300 = r300_context(pipe);
1669
1670 *((unsigned*)r300->sample_mask.state) = mask;
1671
1672 r300_mark_atom_dirty(r300, &r300->sample_mask);
1673 }
1674
r300_set_scissor_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)1675 static void r300_set_scissor_states(struct pipe_context* pipe,
1676 unsigned start_slot,
1677 unsigned num_scissors,
1678 const struct pipe_scissor_state* state)
1679 {
1680 struct r300_context* r300 = r300_context(pipe);
1681
1682 memcpy(r300->scissor_state.state, state,
1683 sizeof(struct pipe_scissor_state));
1684
1685 r300_mark_atom_dirty(r300, &r300->scissor_state);
1686 }
1687
r300_set_viewport_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)1688 static void r300_set_viewport_states(struct pipe_context* pipe,
1689 unsigned start_slot,
1690 unsigned num_viewports,
1691 const struct pipe_viewport_state* state)
1692 {
1693 struct r300_context* r300 = r300_context(pipe);
1694 struct r300_viewport_state* viewport =
1695 (struct r300_viewport_state*)r300->viewport_state.state;
1696
1697 r300->viewport = *state;
1698
1699 if (r300->draw) {
1700 draw_set_viewport_states(r300->draw, start_slot, num_viewports, state);
1701 viewport->vte_control = R300_VTX_XY_FMT | R300_VTX_Z_FMT;
1702 return;
1703 }
1704
1705 /* Do the transform in HW. */
1706 viewport->vte_control = R300_VTX_W0_FMT;
1707
1708 if (state->scale[0] != 1.0f) {
1709 viewport->xscale = state->scale[0];
1710 viewport->vte_control |= R300_VPORT_X_SCALE_ENA;
1711 }
1712 if (state->scale[1] != 1.0f) {
1713 viewport->yscale = state->scale[1];
1714 viewport->vte_control |= R300_VPORT_Y_SCALE_ENA;
1715 }
1716 if (state->scale[2] != 1.0f) {
1717 viewport->zscale = state->scale[2];
1718 viewport->vte_control |= R300_VPORT_Z_SCALE_ENA;
1719 }
1720 if (state->translate[0] != 0.0f) {
1721 viewport->xoffset = state->translate[0];
1722 viewport->vte_control |= R300_VPORT_X_OFFSET_ENA;
1723 }
1724 if (state->translate[1] != 0.0f) {
1725 viewport->yoffset = state->translate[1];
1726 viewport->vte_control |= R300_VPORT_Y_OFFSET_ENA;
1727 }
1728 if (state->translate[2] != 0.0f) {
1729 viewport->zoffset = state->translate[2];
1730 viewport->vte_control |= R300_VPORT_Z_OFFSET_ENA;
1731 }
1732
1733 r300_mark_atom_dirty(r300, &r300->viewport_state);
1734 if (r300->fs.state && r300_fs(r300)->shader &&
1735 r300_fs(r300)->shader->inputs.wpos != ATTR_UNUSED) {
1736 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1737 }
1738 }
1739
r300_set_vertex_buffers_hwtcl(struct pipe_context * pipe,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * buffers)1740 static void r300_set_vertex_buffers_hwtcl(struct pipe_context* pipe,
1741 unsigned start_slot, unsigned count,
1742 const struct pipe_vertex_buffer* buffers)
1743 {
1744 struct r300_context* r300 = r300_context(pipe);
1745
1746 util_set_vertex_buffers_count(r300->vertex_buffer,
1747 &r300->nr_vertex_buffers,
1748 buffers, start_slot, count);
1749
1750 /* There must be at least one vertex buffer set, otherwise it locks up. */
1751 if (!r300->nr_vertex_buffers) {
1752 util_set_vertex_buffers_count(r300->vertex_buffer,
1753 &r300->nr_vertex_buffers,
1754 &r300->dummy_vb, 0, 1);
1755 }
1756
1757 r300->vertex_arrays_dirty = TRUE;
1758 }
1759
r300_set_vertex_buffers_swtcl(struct pipe_context * pipe,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * buffers)1760 static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
1761 unsigned start_slot, unsigned count,
1762 const struct pipe_vertex_buffer* buffers)
1763 {
1764 struct r300_context* r300 = r300_context(pipe);
1765 unsigned i;
1766
1767 util_set_vertex_buffers_count(r300->vertex_buffer,
1768 &r300->nr_vertex_buffers,
1769 buffers, start_slot, count);
1770 draw_set_vertex_buffers(r300->draw, start_slot, count, buffers);
1771
1772 if (!buffers)
1773 return;
1774
1775 for (i = 0; i < count; i++) {
1776 if (buffers[i].user_buffer) {
1777 draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
1778 buffers[i].user_buffer, ~0);
1779 } else if (buffers[i].buffer) {
1780 draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
1781 r300_resource(buffers[i].buffer)->malloced_buffer, ~0);
1782 }
1783 }
1784 }
1785
r300_set_index_buffer_hwtcl(struct pipe_context * pipe,const struct pipe_index_buffer * ib)1786 static void r300_set_index_buffer_hwtcl(struct pipe_context* pipe,
1787 const struct pipe_index_buffer *ib)
1788 {
1789 struct r300_context* r300 = r300_context(pipe);
1790
1791 if (ib) {
1792 pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
1793 memcpy(&r300->index_buffer, ib, sizeof(*ib));
1794 } else {
1795 pipe_resource_reference(&r300->index_buffer.buffer, NULL);
1796 }
1797 }
1798
r300_set_index_buffer_swtcl(struct pipe_context * pipe,const struct pipe_index_buffer * ib)1799 static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
1800 const struct pipe_index_buffer *ib)
1801 {
1802 struct r300_context* r300 = r300_context(pipe);
1803
1804 if (ib) {
1805 const void *buf = NULL;
1806 if (ib->user_buffer) {
1807 buf = ib->user_buffer;
1808 } else if (ib->buffer) {
1809 buf = r300_resource(ib->buffer)->malloced_buffer;
1810 }
1811 draw_set_indexes(r300->draw,
1812 (const ubyte *) buf + ib->offset,
1813 ib->index_size, ~0);
1814 }
1815 }
1816
1817 /* Initialize the PSC tables. */
r300_vertex_psc(struct r300_vertex_element_state * velems)1818 static void r300_vertex_psc(struct r300_vertex_element_state *velems)
1819 {
1820 struct r300_vertex_stream_state *vstream = &velems->vertex_stream;
1821 uint16_t type, swizzle;
1822 enum pipe_format format;
1823 unsigned i;
1824
1825 /* Vertex shaders have no semantics on their inputs,
1826 * so PSC should just route stuff based on the vertex elements,
1827 * and not on attrib information. */
1828 for (i = 0; i < velems->count; i++) {
1829 format = velems->velem[i].src_format;
1830
1831 type = r300_translate_vertex_data_type(format);
1832 if (type == R300_INVALID_FORMAT) {
1833 fprintf(stderr, "r300: Bad vertex format %s.\n",
1834 util_format_short_name(format));
1835 assert(0);
1836 abort();
1837 }
1838
1839 type |= i << R300_DST_VEC_LOC_SHIFT;
1840 swizzle = r300_translate_vertex_data_swizzle(format);
1841
1842 if (i & 1) {
1843 vstream->vap_prog_stream_cntl[i >> 1] |= type << 16;
1844 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle << 16;
1845 } else {
1846 vstream->vap_prog_stream_cntl[i >> 1] |= type;
1847 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle;
1848 }
1849 }
1850
1851 /* Set the last vector in the PSC. */
1852 if (i) {
1853 i -= 1;
1854 }
1855 vstream->vap_prog_stream_cntl[i >> 1] |=
1856 (R300_LAST_VEC << (i & 1 ? 16 : 0));
1857
1858 vstream->count = (i >> 1) + 1;
1859 }
1860
r300_create_vertex_elements_state(struct pipe_context * pipe,unsigned count,const struct pipe_vertex_element * attribs)1861 static void* r300_create_vertex_elements_state(struct pipe_context* pipe,
1862 unsigned count,
1863 const struct pipe_vertex_element* attribs)
1864 {
1865 struct r300_vertex_element_state *velems;
1866 unsigned i;
1867 struct pipe_vertex_element dummy_attrib = {0};
1868
1869 /* R300 Programmable Stream Control (PSC) doesn't support 0 vertex elements. */
1870 if (!count) {
1871 dummy_attrib.src_format = PIPE_FORMAT_R8G8B8A8_UNORM;
1872 attribs = &dummy_attrib;
1873 count = 1;
1874 } else if (count > 16) {
1875 fprintf(stderr, "r300: More than 16 vertex elements are not supported,"
1876 " requested %i, using 16.\n", count);
1877 count = 16;
1878 }
1879
1880 velems = CALLOC_STRUCT(r300_vertex_element_state);
1881 if (!velems)
1882 return NULL;
1883
1884 velems->count = count;
1885 memcpy(velems->velem, attribs, sizeof(struct pipe_vertex_element) * count);
1886
1887 if (r300_screen(pipe->screen)->caps.has_tcl) {
1888 /* Setup PSC.
1889 * The unused components will be replaced by (..., 0, 1). */
1890 r300_vertex_psc(velems);
1891
1892 for (i = 0; i < count; i++) {
1893 velems->format_size[i] =
1894 align(util_format_get_blocksize(velems->velem[i].src_format), 4);
1895 velems->vertex_size_dwords += velems->format_size[i] / 4;
1896 }
1897 }
1898
1899 return velems;
1900 }
1901
r300_bind_vertex_elements_state(struct pipe_context * pipe,void * state)1902 static void r300_bind_vertex_elements_state(struct pipe_context *pipe,
1903 void *state)
1904 {
1905 struct r300_context *r300 = r300_context(pipe);
1906 struct r300_vertex_element_state *velems = state;
1907
1908 if (!velems) {
1909 return;
1910 }
1911
1912 r300->velems = velems;
1913
1914 if (r300->draw) {
1915 draw_set_vertex_elements(r300->draw, velems->count, velems->velem);
1916 return;
1917 }
1918
1919 UPDATE_STATE(&velems->vertex_stream, r300->vertex_stream_state);
1920 r300->vertex_stream_state.size = (1 + velems->vertex_stream.count) * 2;
1921 r300->vertex_arrays_dirty = TRUE;
1922 }
1923
r300_delete_vertex_elements_state(struct pipe_context * pipe,void * state)1924 static void r300_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
1925 {
1926 FREE(state);
1927 }
1928
r300_create_vs_state(struct pipe_context * pipe,const struct pipe_shader_state * shader)1929 static void* r300_create_vs_state(struct pipe_context* pipe,
1930 const struct pipe_shader_state* shader)
1931 {
1932 struct r300_context* r300 = r300_context(pipe);
1933 struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader);
1934
1935 /* Copy state directly into shader. */
1936 vs->state = *shader;
1937 vs->state.tokens = tgsi_dup_tokens(shader->tokens);
1938
1939 if (r300->screen->caps.has_tcl) {
1940 r300_init_vs_outputs(r300, vs);
1941 r300_translate_vertex_shader(r300, vs);
1942 } else {
1943 r300_draw_init_vertex_shader(r300, vs);
1944 }
1945
1946 return vs;
1947 }
1948
r300_bind_vs_state(struct pipe_context * pipe,void * shader)1949 static void r300_bind_vs_state(struct pipe_context* pipe, void* shader)
1950 {
1951 struct r300_context* r300 = r300_context(pipe);
1952 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1953
1954 if (!vs) {
1955 r300->vs_state.state = NULL;
1956 return;
1957 }
1958 if (vs == r300->vs_state.state) {
1959 return;
1960 }
1961 r300->vs_state.state = vs;
1962
1963 /* The majority of the RS block bits is dependent on the vertex shader. */
1964 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1965
1966 if (r300->screen->caps.has_tcl) {
1967 unsigned fc_op_dwords = r300->screen->caps.is_r500 ? 3 : 2;
1968 r300_mark_atom_dirty(r300, &r300->vs_state);
1969 r300->vs_state.size = vs->code.length + 9 +
1970 (R300_VS_MAX_FC_OPS * fc_op_dwords + 4);
1971
1972 r300_mark_atom_dirty(r300, &r300->vs_constants);
1973 r300->vs_constants.size =
1974 2 +
1975 (vs->externals_count ? vs->externals_count * 4 + 3 : 0) +
1976 (vs->immediates_count ? vs->immediates_count * 4 + 3 : 0);
1977
1978 ((struct r300_constant_buffer*)r300->vs_constants.state)->remap_table =
1979 vs->code.constants_remap_table;
1980
1981 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1982 } else {
1983 draw_bind_vertex_shader(r300->draw,
1984 (struct draw_vertex_shader*)vs->draw_vs);
1985 }
1986 }
1987
r300_delete_vs_state(struct pipe_context * pipe,void * shader)1988 static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
1989 {
1990 struct r300_context* r300 = r300_context(pipe);
1991 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1992
1993 if (r300->screen->caps.has_tcl) {
1994 rc_constants_destroy(&vs->code.constants);
1995 FREE(vs->code.constants_remap_table);
1996 } else {
1997 draw_delete_vertex_shader(r300->draw,
1998 (struct draw_vertex_shader*)vs->draw_vs);
1999 }
2000
2001 FREE((void*)vs->state.tokens);
2002 FREE(shader);
2003 }
2004
r300_set_constant_buffer(struct pipe_context * pipe,uint shader,uint index,const struct pipe_constant_buffer * cb)2005 static void r300_set_constant_buffer(struct pipe_context *pipe,
2006 uint shader, uint index,
2007 const struct pipe_constant_buffer *cb)
2008 {
2009 struct r300_context* r300 = r300_context(pipe);
2010 struct r300_constant_buffer *cbuf;
2011 uint32_t *mapped;
2012
2013 if (!cb || (!cb->buffer && !cb->user_buffer))
2014 return;
2015
2016 switch (shader) {
2017 case PIPE_SHADER_VERTEX:
2018 cbuf = (struct r300_constant_buffer*)r300->vs_constants.state;
2019 break;
2020 case PIPE_SHADER_FRAGMENT:
2021 cbuf = (struct r300_constant_buffer*)r300->fs_constants.state;
2022 break;
2023 default:
2024 return;
2025 }
2026
2027
2028 if (cb->user_buffer)
2029 mapped = (uint32_t*)cb->user_buffer;
2030 else {
2031 struct r300_resource *rbuf = r300_resource(cb->buffer);
2032
2033 if (rbuf && rbuf->malloced_buffer)
2034 mapped = (uint32_t*)rbuf->malloced_buffer;
2035 else
2036 return;
2037 }
2038
2039 if (shader == PIPE_SHADER_FRAGMENT ||
2040 (shader == PIPE_SHADER_VERTEX && r300->screen->caps.has_tcl)) {
2041 cbuf->ptr = mapped;
2042 }
2043
2044 if (shader == PIPE_SHADER_VERTEX) {
2045 if (r300->screen->caps.has_tcl) {
2046 struct r300_vertex_shader *vs =
2047 (struct r300_vertex_shader*)r300->vs_state.state;
2048
2049 if (!vs) {
2050 cbuf->buffer_base = 0;
2051 return;
2052 }
2053
2054 cbuf->buffer_base = r300->vs_const_base;
2055 r300->vs_const_base += vs->code.constants.Count;
2056 if (r300->vs_const_base > R500_MAX_PVS_CONST_VECS) {
2057 r300->vs_const_base = vs->code.constants.Count;
2058 cbuf->buffer_base = 0;
2059 r300_mark_atom_dirty(r300, &r300->pvs_flush);
2060 }
2061 r300_mark_atom_dirty(r300, &r300->vs_constants);
2062 } else if (r300->draw) {
2063 draw_set_mapped_constant_buffer(r300->draw, PIPE_SHADER_VERTEX,
2064 0, mapped, cb->buffer_size);
2065 }
2066 } else if (shader == PIPE_SHADER_FRAGMENT) {
2067 r300_mark_atom_dirty(r300, &r300->fs_constants);
2068 }
2069 }
2070
r300_texture_barrier(struct pipe_context * pipe,unsigned flags)2071 static void r300_texture_barrier(struct pipe_context *pipe, unsigned flags)
2072 {
2073 struct r300_context *r300 = r300_context(pipe);
2074
2075 r300_mark_atom_dirty(r300, &r300->gpu_flush);
2076 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
2077 }
2078
r300_memory_barrier(struct pipe_context * pipe,unsigned flags)2079 static void r300_memory_barrier(struct pipe_context *pipe, unsigned flags)
2080 {
2081 }
2082
r300_init_state_functions(struct r300_context * r300)2083 void r300_init_state_functions(struct r300_context* r300)
2084 {
2085 r300->context.create_blend_state = r300_create_blend_state;
2086 r300->context.bind_blend_state = r300_bind_blend_state;
2087 r300->context.delete_blend_state = r300_delete_blend_state;
2088
2089 r300->context.set_blend_color = r300_set_blend_color;
2090
2091 r300->context.set_clip_state = r300_set_clip_state;
2092 r300->context.set_sample_mask = r300_set_sample_mask;
2093
2094 r300->context.set_constant_buffer = r300_set_constant_buffer;
2095
2096 r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state;
2097 r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state;
2098 r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state;
2099
2100 r300->context.set_stencil_ref = r300_set_stencil_ref;
2101
2102 r300->context.set_framebuffer_state = r300_set_framebuffer_state;
2103
2104 r300->context.create_fs_state = r300_create_fs_state;
2105 r300->context.bind_fs_state = r300_bind_fs_state;
2106 r300->context.delete_fs_state = r300_delete_fs_state;
2107
2108 r300->context.set_polygon_stipple = r300_set_polygon_stipple;
2109
2110 r300->context.create_rasterizer_state = r300_create_rs_state;
2111 r300->context.bind_rasterizer_state = r300_bind_rs_state;
2112 r300->context.delete_rasterizer_state = r300_delete_rs_state;
2113
2114 r300->context.create_sampler_state = r300_create_sampler_state;
2115 r300->context.bind_sampler_states = r300_bind_sampler_states;
2116 r300->context.delete_sampler_state = r300_delete_sampler_state;
2117
2118 r300->context.set_sampler_views = r300_set_sampler_views;
2119 r300->context.create_sampler_view = r300_create_sampler_view;
2120 r300->context.sampler_view_destroy = r300_sampler_view_destroy;
2121
2122 r300->context.set_scissor_states = r300_set_scissor_states;
2123
2124 r300->context.set_viewport_states = r300_set_viewport_states;
2125
2126 if (r300->screen->caps.has_tcl) {
2127 r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
2128 r300->context.set_index_buffer = r300_set_index_buffer_hwtcl;
2129 } else {
2130 r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
2131 r300->context.set_index_buffer = r300_set_index_buffer_swtcl;
2132 }
2133
2134 r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
2135 r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state;
2136 r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state;
2137
2138 r300->context.create_vs_state = r300_create_vs_state;
2139 r300->context.bind_vs_state = r300_bind_vs_state;
2140 r300->context.delete_vs_state = r300_delete_vs_state;
2141
2142 r300->context.texture_barrier = r300_texture_barrier;
2143 r300->context.memory_barrier = r300_memory_barrier;
2144 }
2145