1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "draw/draw_context.h"
8
9 #include "util/u_framebuffer.h"
10 #include "util/half_float.h"
11 #include "util/u_helpers.h"
12 #include "util/u_math.h"
13 #include "util/u_memory.h"
14 #include "util/u_pack_color.h"
15 #include "util/u_transfer.h"
16 #include "util/u_blend.h"
17
18 #include "tgsi/tgsi_parse.h"
19
20 #include "util/detect.h"
21
22 #include "r300_cb.h"
23 #include "r300_context.h"
24 #include "r300_emit.h"
25 #include "r300_reg.h"
26 #include "r300_screen.h"
27 #include "r300_screen_buffer.h"
28 #include "r300_state_inlines.h"
29 #include "r300_fs.h"
30 #include "r300_texture.h"
31 #include "r300_vs.h"
32 #include "compiler/r300_nir.h"
33 #include "compiler/nir_to_rc.h"
34
35 /* r300_state: Functions used to initialize state context by translating
36 * Gallium state objects into semi-native r300 state objects. */
37
38 #define UPDATE_STATE(cso, atom) \
39 if (cso != atom.state) { \
40 atom.state = cso; \
41 r300_mark_atom_dirty(r300, &(atom)); \
42 }
43
blend_discard_if_src_alpha_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)44 static bool blend_discard_if_src_alpha_0(unsigned srcRGB, unsigned srcA,
45 unsigned dstRGB, unsigned dstA)
46 {
47 /* If the blend equation is ADD or REVERSE_SUBTRACT,
48 * SRC_ALPHA == 0, and the following state is set, the colorbuffer
49 * will not be changed.
50 * Notice that the dst factors are the src factors inverted. */
51 return (srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
52 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
53 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
54 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
55 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
56 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
57 srcA == PIPE_BLENDFACTOR_ZERO) &&
58 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
59 dstRGB == PIPE_BLENDFACTOR_ONE) &&
60 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
61 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
62 dstA == PIPE_BLENDFACTOR_ONE);
63 }
64
blend_discard_if_src_alpha_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)65 static bool blend_discard_if_src_alpha_1(unsigned srcRGB, unsigned srcA,
66 unsigned dstRGB, unsigned dstA)
67 {
68 /* If the blend equation is ADD or REVERSE_SUBTRACT,
69 * SRC_ALPHA == 1, and the following state is set, the colorbuffer
70 * will not be changed.
71 * Notice that the dst factors are the src factors inverted. */
72 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
73 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
74 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
75 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
76 srcA == PIPE_BLENDFACTOR_ZERO) &&
77 (dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
78 dstRGB == PIPE_BLENDFACTOR_ONE) &&
79 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
80 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
81 dstA == PIPE_BLENDFACTOR_ONE);
82 }
83
blend_discard_if_src_color_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)84 static bool blend_discard_if_src_color_0(unsigned srcRGB, unsigned srcA,
85 unsigned dstRGB, unsigned dstA)
86 {
87 /* If the blend equation is ADD or REVERSE_SUBTRACT,
88 * SRC_COLOR == (0,0,0), and the following state is set, the colorbuffer
89 * will not be changed.
90 * Notice that the dst factors are the src factors inverted. */
91 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
92 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
93 (srcA == PIPE_BLENDFACTOR_ZERO) &&
94 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
95 dstRGB == PIPE_BLENDFACTOR_ONE) &&
96 (dstA == PIPE_BLENDFACTOR_ONE);
97 }
98
blend_discard_if_src_color_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)99 static bool blend_discard_if_src_color_1(unsigned srcRGB, unsigned srcA,
100 unsigned dstRGB, unsigned dstA)
101 {
102 /* If the blend equation is ADD or REVERSE_SUBTRACT,
103 * SRC_COLOR == (1,1,1), and the following state is set, the colorbuffer
104 * will not be changed.
105 * Notice that the dst factors are the src factors inverted. */
106 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
107 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
108 (srcA == PIPE_BLENDFACTOR_ZERO) &&
109 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
110 dstRGB == PIPE_BLENDFACTOR_ONE) &&
111 (dstA == PIPE_BLENDFACTOR_ONE);
112 }
113
blend_discard_if_src_alpha_color_0(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)114 static bool blend_discard_if_src_alpha_color_0(unsigned srcRGB, unsigned srcA,
115 unsigned dstRGB, unsigned dstA)
116 {
117 /* If the blend equation is ADD or REVERSE_SUBTRACT,
118 * SRC_ALPHA_COLOR == (0,0,0,0), and the following state is set,
119 * the colorbuffer will not be changed.
120 * Notice that the dst factors are the src factors inverted. */
121 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
122 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
123 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
124 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
125 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
126 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
127 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
128 srcA == PIPE_BLENDFACTOR_ZERO) &&
129 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
130 dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
131 dstRGB == PIPE_BLENDFACTOR_ONE) &&
132 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
133 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
134 dstA == PIPE_BLENDFACTOR_ONE);
135 }
136
blend_discard_if_src_alpha_color_1(unsigned srcRGB,unsigned srcA,unsigned dstRGB,unsigned dstA)137 static bool blend_discard_if_src_alpha_color_1(unsigned srcRGB, unsigned srcA,
138 unsigned dstRGB, unsigned dstA)
139 {
140 /* If the blend equation is ADD or REVERSE_SUBTRACT,
141 * SRC_ALPHA_COLOR == (1,1,1,1), and the following state is set,
142 * the colorbuffer will not be changed.
143 * Notice that the dst factors are the src factors inverted. */
144 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
145 srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
146 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
147 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
148 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
149 srcA == PIPE_BLENDFACTOR_ZERO) &&
150 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
151 dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
152 dstRGB == PIPE_BLENDFACTOR_ONE) &&
153 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
154 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
155 dstA == PIPE_BLENDFACTOR_ONE);
156 }
157
blend_discard_conditionally(unsigned eqRGB,unsigned eqA,unsigned dstRGB,unsigned dstA,unsigned srcRGB,unsigned srcA)158 static unsigned blend_discard_conditionally(unsigned eqRGB, unsigned eqA,
159 unsigned dstRGB, unsigned dstA,
160 unsigned srcRGB, unsigned srcA)
161 {
162 unsigned blend_control = 0;
163
164 /* Optimization: discard pixels which don't change the colorbuffer.
165 *
166 * The code below is non-trivial and some math is involved.
167 *
168 * Discarding pixels must be disabled when FP16 AA is enabled.
169 * This is a hardware bug. Also, this implementation wouldn't work
170 * with FP blending enabled and equation clamping disabled.
171 *
172 * Equations other than ADD are rarely used and therefore won't be
173 * optimized. */
174 if ((eqRGB == PIPE_BLEND_ADD || eqRGB == PIPE_BLEND_REVERSE_SUBTRACT) &&
175 (eqA == PIPE_BLEND_ADD || eqA == PIPE_BLEND_REVERSE_SUBTRACT)) {
176 /* ADD: X+Y
177 * REVERSE_SUBTRACT: Y-X
178 *
179 * The idea is:
180 * If X = src*srcFactor = 0 and Y = dst*dstFactor = 1,
181 * then CB will not be changed.
182 *
183 * Given the srcFactor and dstFactor variables, we can derive
184 * what src and dst should be equal to and discard appropriate
185 * pixels.
186 */
187 if (blend_discard_if_src_alpha_0(srcRGB, srcA, dstRGB, dstA)) {
188 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0;
189 } else if (blend_discard_if_src_alpha_1(srcRGB, srcA,
190 dstRGB, dstA)) {
191 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1;
192 } else if (blend_discard_if_src_color_0(srcRGB, srcA,
193 dstRGB, dstA)) {
194 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_0;
195 } else if (blend_discard_if_src_color_1(srcRGB, srcA,
196 dstRGB, dstA)) {
197 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_1;
198 } else if (blend_discard_if_src_alpha_color_0(srcRGB, srcA,
199 dstRGB, dstA)) {
200 blend_control |=
201 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0;
202 } else if (blend_discard_if_src_alpha_color_1(srcRGB, srcA,
203 dstRGB, dstA)) {
204 blend_control |=
205 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1;
206 }
207 }
208 return blend_control;
209 }
210
211 /* The hardware colormask is clunky a must be swizzled depending on the format.
212 * This was figured out by trial-and-error. */
bgra_cmask(unsigned mask)213 static unsigned bgra_cmask(unsigned mask)
214 {
215 return ((mask & PIPE_MASK_R) << 2) |
216 ((mask & PIPE_MASK_B) >> 2) |
217 (mask & (PIPE_MASK_G | PIPE_MASK_A));
218 }
219
rgba_cmask(unsigned mask)220 static unsigned rgba_cmask(unsigned mask)
221 {
222 return mask & PIPE_MASK_RGBA;
223 }
224
rrrr_cmask(unsigned mask)225 static unsigned rrrr_cmask(unsigned mask)
226 {
227 return (mask & PIPE_MASK_R) |
228 ((mask & PIPE_MASK_R) << 1) |
229 ((mask & PIPE_MASK_R) << 2) |
230 ((mask & PIPE_MASK_R) << 3);
231 }
232
aaaa_cmask(unsigned mask)233 static unsigned aaaa_cmask(unsigned mask)
234 {
235 return ((mask & PIPE_MASK_A) >> 3) |
236 ((mask & PIPE_MASK_A) >> 2) |
237 ((mask & PIPE_MASK_A) >> 1) |
238 (mask & PIPE_MASK_A);
239 }
240
grrg_cmask(unsigned mask)241 static unsigned grrg_cmask(unsigned mask)
242 {
243 return ((mask & PIPE_MASK_R) << 1) |
244 ((mask & PIPE_MASK_R) << 2) |
245 ((mask & PIPE_MASK_G) >> 1) |
246 ((mask & PIPE_MASK_G) << 2);
247 }
248
arra_cmask(unsigned mask)249 static unsigned arra_cmask(unsigned mask)
250 {
251 return ((mask & PIPE_MASK_R) << 1) |
252 ((mask & PIPE_MASK_R) << 2) |
253 ((mask & PIPE_MASK_A) >> 3) |
254 (mask & PIPE_MASK_A);
255 }
256
blend_read_enable(unsigned eqRGB,unsigned eqA,unsigned dstRGB,unsigned dstA,unsigned srcRGB,unsigned srcA,bool src_alpha_optz)257 static unsigned blend_read_enable(unsigned eqRGB, unsigned eqA,
258 unsigned dstRGB, unsigned dstA,
259 unsigned srcRGB, unsigned srcA,
260 bool src_alpha_optz)
261 {
262 unsigned blend_control = 0;
263
264 /* Optimization: some operations do not require the destination color.
265 *
266 * When SRC_ALPHA_SATURATE is used, colorbuffer reads must be enabled,
267 * otherwise blending gives incorrect results. It seems to be
268 * a hardware bug. */
269 if (eqRGB == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MIN ||
270 eqRGB == PIPE_BLEND_MAX || eqA == PIPE_BLEND_MAX ||
271 dstRGB != PIPE_BLENDFACTOR_ZERO ||
272 dstA != PIPE_BLENDFACTOR_ZERO ||
273 util_blend_factor_uses_dest(srcRGB, false) ||
274 util_blend_factor_uses_dest(srcA, true)) {
275 /* Enable reading from the colorbuffer. */
276 blend_control |= R300_READ_ENABLE;
277
278 if (src_alpha_optz) {
279 /* Optimization: Depending on incoming pixels, we can
280 * conditionally disable the reading in hardware... */
281 if (eqRGB != PIPE_BLEND_MIN && eqA != PIPE_BLEND_MIN &&
282 eqRGB != PIPE_BLEND_MAX && eqA != PIPE_BLEND_MAX) {
283 /* Disable reading if SRC_ALPHA == 0. */
284 if ((dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
285 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
286 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
287 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
288 dstA == PIPE_BLENDFACTOR_ZERO) &&
289 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
290 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
291 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
292 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
293 blend_control |= R500_SRC_ALPHA_0_NO_READ;
294 }
295
296 /* Disable reading if SRC_ALPHA == 1. */
297 if ((dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
298 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
299 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
300 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
301 dstA == PIPE_BLENDFACTOR_ZERO) &&
302 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
303 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
304 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
305 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
306 blend_control |= R500_SRC_ALPHA_1_NO_READ;
307 }
308 }
309 }
310 }
311 return blend_control;
312 }
313
314 /* Create a new blend state based on the CSO blend state.
315 *
316 * This encompasses alpha blending, logic/raster ops, and blend dithering. */
r300_create_blend_state(struct pipe_context * pipe,const struct pipe_blend_state * state)317 static void* r300_create_blend_state(struct pipe_context* pipe,
318 const struct pipe_blend_state* state)
319 {
320 struct r300_screen* r300screen = r300_screen(pipe->screen);
321 struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state);
322 uint32_t blend_control = 0; /* R300_RB3D_CBLEND: 0x4e04 */
323 uint32_t blend_control_noclamp = 0; /* R300_RB3D_CBLEND: 0x4e04 */
324 uint32_t blend_control_noalpha = 0; /* R300_RB3D_CBLEND: 0x4e04 */
325 uint32_t blend_control_noalpha_noclamp = 0; /* R300_RB3D_CBLEND: 0x4e04 */
326 uint32_t alpha_blend_control = 0; /* R300_RB3D_ABLEND: 0x4e08 */
327 uint32_t alpha_blend_control_noclamp = 0; /* R300_RB3D_ABLEND: 0x4e08 */
328 uint32_t alpha_blend_control_noalpha = 0; /* R300_RB3D_ABLEND: 0x4e08 */
329 uint32_t alpha_blend_control_noalpha_noclamp = 0; /* R300_RB3D_ABLEND: 0x4e08 */
330 uint32_t rop = 0; /* R300_RB3D_ROPCNTL: 0x4e18 */
331 uint32_t dither = 0; /* R300_RB3D_DITHER_CTL: 0x4e50 */
332 int i;
333
334 const unsigned eqRGB = state->rt[0].rgb_func;
335 const unsigned srcRGB = state->rt[0].rgb_src_factor;
336 const unsigned dstRGB = state->rt[0].rgb_dst_factor;
337
338 const unsigned eqA = state->rt[0].alpha_func;
339 const unsigned srcA = state->rt[0].alpha_src_factor;
340 const unsigned dstA = state->rt[0].alpha_dst_factor;
341
342 unsigned srcRGBX = srcRGB;
343 unsigned dstRGBX = dstRGB;
344 CB_LOCALS;
345
346 blend->state = *state;
347
348 /* force DST_ALPHA to ONE where we can */
349 switch (srcRGBX) {
350 case PIPE_BLENDFACTOR_DST_ALPHA:
351 srcRGBX = PIPE_BLENDFACTOR_ONE;
352 break;
353 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
354 srcRGBX = PIPE_BLENDFACTOR_ZERO;
355 break;
356 }
357
358 switch (dstRGBX) {
359 case PIPE_BLENDFACTOR_DST_ALPHA:
360 dstRGBX = PIPE_BLENDFACTOR_ONE;
361 break;
362 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
363 dstRGBX = PIPE_BLENDFACTOR_ZERO;
364 break;
365 }
366
367 /* Get blending register values. */
368 if (state->rt[0].blend_enable) {
369 unsigned blend_eq, blend_eq_noclamp;
370
371 /* despite the name, ALPHA_BLEND_ENABLE has nothing to do with alpha,
372 * this is just the crappy D3D naming */
373 blend_control = blend_control_noclamp =
374 R300_ALPHA_BLEND_ENABLE |
375 ( r300_translate_blend_factor(srcRGB) << R300_SRC_BLEND_SHIFT) |
376 ( r300_translate_blend_factor(dstRGB) << R300_DST_BLEND_SHIFT);
377
378 blend_control_noalpha = blend_control_noalpha_noclamp =
379 R300_ALPHA_BLEND_ENABLE |
380 ( r300_translate_blend_factor(srcRGBX) << R300_SRC_BLEND_SHIFT) |
381 ( r300_translate_blend_factor(dstRGBX) << R300_DST_BLEND_SHIFT);
382
383 blend_eq = r300_translate_blend_function(eqRGB, true);
384 blend_eq_noclamp = r300_translate_blend_function(eqRGB, false);
385
386 blend_control |= blend_eq;
387 blend_control_noalpha |= blend_eq;
388 blend_control_noclamp |= blend_eq_noclamp;
389 blend_control_noalpha_noclamp |= blend_eq_noclamp;
390
391 /* Optimization: some operations do not require the destination color. */
392 blend_control |= blend_read_enable(eqRGB, eqA, dstRGB, dstA,
393 srcRGB, srcA, r300screen->caps.is_r500);
394 blend_control_noclamp |= blend_read_enable(eqRGB, eqA, dstRGB, dstA,
395 srcRGB, srcA, false);
396 blend_control_noalpha |= blend_read_enable(eqRGB, eqA, dstRGBX, dstA,
397 srcRGBX, srcA, r300screen->caps.is_r500);
398 blend_control_noalpha_noclamp |= blend_read_enable(eqRGB, eqA, dstRGBX, dstA,
399 srcRGBX, srcA, false);
400
401 /* Optimization: discard pixels which don't change the colorbuffer.
402 * It cannot be used with FP16 AA. */
403 blend_control |= blend_discard_conditionally(eqRGB, eqA, dstRGB, dstA,
404 srcRGB, srcA);
405 blend_control_noalpha |= blend_discard_conditionally(eqRGB, eqA, dstRGBX, dstA,
406 srcRGBX, srcA);
407
408 /* separate alpha */
409 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
410 blend_control |= R300_SEPARATE_ALPHA_ENABLE;
411 blend_control_noclamp |= R300_SEPARATE_ALPHA_ENABLE;
412
413 alpha_blend_control = alpha_blend_control_noclamp =
414 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
415 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
416 alpha_blend_control |= r300_translate_blend_function(eqA, true);
417 alpha_blend_control_noclamp |= r300_translate_blend_function(eqA, false);
418 }
419 if (srcA != srcRGBX || dstA != dstRGBX || eqA != eqRGB) {
420 blend_control_noalpha |= R300_SEPARATE_ALPHA_ENABLE;
421 blend_control_noalpha_noclamp |= R300_SEPARATE_ALPHA_ENABLE;
422
423 alpha_blend_control_noalpha = alpha_blend_control_noalpha_noclamp =
424 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
425 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
426 alpha_blend_control_noalpha |= r300_translate_blend_function(eqA, true);
427 alpha_blend_control_noalpha_noclamp |= r300_translate_blend_function(eqA, false);
428 }
429 }
430
431 /* PIPE_LOGICOP_* don't need to be translated, fortunately. */
432 if (state->logicop_enable) {
433 rop = R300_RB3D_ROPCNTL_ROP_ENABLE |
434 (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT;
435 }
436
437 /* Neither fglrx nor classic r300 ever set this, regardless of dithering
438 * state. Since it's an optional implementation detail, we can leave it
439 * out and never dither.
440 *
441 * This could be revisited if we ever get quality or conformance hints.
442 *
443 if (state->dither) {
444 dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT |
445 R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT;
446 }
447 */
448
449 /* Build a command buffer. */
450 {
451 unsigned (*func[COLORMASK_NUM_SWIZZLES])(unsigned) = {
452 bgra_cmask,
453 rgba_cmask,
454 rrrr_cmask,
455 aaaa_cmask,
456 grrg_cmask,
457 arra_cmask,
458 bgra_cmask,
459 rgba_cmask
460 };
461
462 for (i = 0; i < COLORMASK_NUM_SWIZZLES; i++) {
463 bool has_alpha = i != COLORMASK_RGBX && i != COLORMASK_BGRX;
464
465 BEGIN_CB(blend->cb_clamp[i], 8);
466 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
467 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
468 OUT_CB(has_alpha ? blend_control : blend_control_noalpha);
469 OUT_CB(has_alpha ? alpha_blend_control : alpha_blend_control_noalpha);
470 OUT_CB(func[i](state->rt[0].colormask));
471 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
472 END_CB;
473 }
474 }
475
476 /* Build a command buffer (for RGBA16F). */
477 BEGIN_CB(blend->cb_noclamp, 8);
478 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
479 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
480 OUT_CB(blend_control_noclamp);
481 OUT_CB(alpha_blend_control_noclamp);
482 OUT_CB(rgba_cmask(state->rt[0].colormask));
483 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
484 END_CB;
485
486 /* Build a command buffer (for RGB16F). */
487 BEGIN_CB(blend->cb_noclamp_noalpha, 8);
488 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
489 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
490 OUT_CB(blend_control_noalpha_noclamp);
491 OUT_CB(alpha_blend_control_noalpha_noclamp);
492 OUT_CB(rgba_cmask(state->rt[0].colormask));
493 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
494 END_CB;
495
496 /* The same as above, but with no colorbuffer reads and writes. */
497 BEGIN_CB(blend->cb_no_readwrite, 8);
498 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
499 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
500 OUT_CB(0);
501 OUT_CB(0);
502 OUT_CB(0);
503 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
504 END_CB;
505
506 return (void*)blend;
507 }
508
509 /* Bind blend state. */
r300_bind_blend_state(struct pipe_context * pipe,void * state)510 static void r300_bind_blend_state(struct pipe_context* pipe,
511 void* state)
512 {
513 struct r300_context* r300 = r300_context(pipe);
514 struct r300_blend_state *blend = (struct r300_blend_state*)state;
515 bool last_alpha_to_one = r300->alpha_to_one;
516 bool last_alpha_to_coverage = r300->alpha_to_coverage;
517
518 UPDATE_STATE(state, r300->blend_state);
519
520 if (!blend)
521 return;
522
523 r300->alpha_to_one = blend->state.alpha_to_one;
524 r300->alpha_to_coverage = blend->state.alpha_to_coverage;
525
526 if (r300->alpha_to_one != last_alpha_to_one && r300->msaa_enable &&
527 r300->fs_status == FRAGMENT_SHADER_VALID) {
528 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
529 }
530
531 if (r300->alpha_to_coverage != last_alpha_to_coverage &&
532 r300->msaa_enable) {
533 r300_mark_atom_dirty(r300, &r300->dsa_state);
534 }
535 }
536
537 /* Free blend state. */
r300_delete_blend_state(struct pipe_context * pipe,void * state)538 static void r300_delete_blend_state(struct pipe_context* pipe,
539 void* state)
540 {
541 FREE(state);
542 }
543
544 /* Convert float to 10bit integer */
float_to_fixed10(float f)545 static unsigned float_to_fixed10(float f)
546 {
547 return CLAMP((unsigned)(f * 1023.9f), 0, 1023);
548 }
549
550 /* Set blend color.
551 * Setup both R300 and R500 registers, figure out later which one to write. */
r300_set_blend_color(struct pipe_context * pipe,const struct pipe_blend_color * color)552 static void r300_set_blend_color(struct pipe_context* pipe,
553 const struct pipe_blend_color* color)
554 {
555 struct r300_context* r300 = r300_context(pipe);
556 struct pipe_framebuffer_state *fb = r300->fb_state.state;
557 struct r300_blend_color_state *state =
558 (struct r300_blend_color_state*)r300->blend_color_state.state;
559 struct pipe_blend_color c;
560 struct pipe_surface *cb;
561 float tmp;
562 CB_LOCALS;
563
564 state->state = *color; /* Save it, so that we can reuse it in set_fb_state */
565 c = *color;
566 cb = fb->nr_cbufs ? r300_get_nonnull_cb(fb, 0) : NULL;
567
568 /* The blend color is dependent on the colorbuffer format. */
569 if (cb) {
570 switch (cb->format) {
571 case PIPE_FORMAT_R8_UNORM:
572 case PIPE_FORMAT_L8_UNORM:
573 case PIPE_FORMAT_I8_UNORM:
574 c.color[1] = c.color[0];
575 break;
576
577 case PIPE_FORMAT_A8_UNORM:
578 c.color[1] = c.color[3];
579 break;
580
581 case PIPE_FORMAT_R8G8_UNORM:
582 c.color[2] = c.color[1];
583 break;
584
585 case PIPE_FORMAT_L8A8_UNORM:
586 case PIPE_FORMAT_R8A8_UNORM:
587 c.color[2] = c.color[3];
588 break;
589
590 case PIPE_FORMAT_R8G8B8A8_UNORM:
591 case PIPE_FORMAT_R8G8B8X8_UNORM:
592 case PIPE_FORMAT_R10G10B10A2_UNORM:
593 tmp = c.color[0];
594 c.color[0] = c.color[2];
595 c.color[2] = tmp;
596 break;
597
598 default:;
599 }
600 }
601
602 if (r300->screen->caps.is_r500) {
603 BEGIN_CB(state->cb, 3);
604 OUT_CB_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
605
606 switch (cb ? cb->format : 0) {
607 case PIPE_FORMAT_R16G16B16A16_FLOAT:
608 case PIPE_FORMAT_R16G16B16X16_FLOAT:
609 OUT_CB(_mesa_float_to_half(c.color[2]) |
610 (_mesa_float_to_half(c.color[3]) << 16));
611 OUT_CB(_mesa_float_to_half(c.color[0]) |
612 (_mesa_float_to_half(c.color[1]) << 16));
613 break;
614
615 default:
616 OUT_CB(float_to_fixed10(c.color[0]) |
617 (float_to_fixed10(c.color[3]) << 16));
618 OUT_CB(float_to_fixed10(c.color[2]) |
619 (float_to_fixed10(c.color[1]) << 16));
620 }
621
622 END_CB;
623 } else {
624 union util_color uc;
625 util_pack_color(c.color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
626
627 BEGIN_CB(state->cb, 2);
628 OUT_CB_REG(R300_RB3D_BLEND_COLOR, uc.ui[0]);
629 END_CB;
630 }
631
632 r300_mark_atom_dirty(r300, &r300->blend_color_state);
633 }
634
r300_set_clip_state(struct pipe_context * pipe,const struct pipe_clip_state * state)635 static void r300_set_clip_state(struct pipe_context* pipe,
636 const struct pipe_clip_state* state)
637 {
638 struct r300_context* r300 = r300_context(pipe);
639 struct r300_clip_state *clip =
640 (struct r300_clip_state*)r300->clip_state.state;
641 CB_LOCALS;
642
643 if (r300->screen->caps.has_tcl) {
644 BEGIN_CB(clip->cb, r300->clip_state.size);
645 OUT_CB_REG(R300_VAP_PVS_VECTOR_INDX_REG,
646 (r300->screen->caps.is_r500 ?
647 R500_PVS_UCP_START : R300_PVS_UCP_START));
648 OUT_CB_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
649 OUT_CB_TABLE(state->ucp, 6 * 4);
650 END_CB;
651
652 r300_mark_atom_dirty(r300, &r300->clip_state);
653 } else {
654 draw_set_clip_state(r300->draw, state);
655 }
656 }
657
658 /* Create a new depth, stencil, and alpha state based on the CSO dsa state.
659 *
660 * This contains the depth buffer, stencil buffer, alpha test, and such.
661 * On the Radeon, depth and stencil buffer setup are intertwined, which is
662 * the reason for some of the strange-looking assignments across registers. */
r300_create_dsa_state(struct pipe_context * pipe,const struct pipe_depth_stencil_alpha_state * state)663 static void* r300_create_dsa_state(struct pipe_context* pipe,
664 const struct pipe_depth_stencil_alpha_state* state)
665 {
666 bool is_r500 = r300_screen(pipe->screen)->caps.is_r500;
667 struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state);
668 CB_LOCALS;
669 uint32_t alpha_value_fp16 = 0;
670 uint32_t z_buffer_control = 0;
671 uint32_t z_stencil_control = 0;
672 uint32_t stencil_ref_mask = 0;
673 uint32_t stencil_ref_bf = 0;
674
675 dsa->dsa = *state;
676
677 /* Depth test setup. - separate write mask depth for decomp flush */
678 if (state->depth_writemask) {
679 z_buffer_control |= R300_Z_WRITE_ENABLE;
680 }
681
682 if (state->depth_enabled) {
683 z_buffer_control |= R300_Z_ENABLE;
684
685 z_stencil_control |=
686 (r300_translate_depth_stencil_function(state->depth_func) <<
687 R300_Z_FUNC_SHIFT);
688 }
689
690 /* Stencil buffer setup. */
691 if (state->stencil[0].enabled) {
692 z_buffer_control |= R300_STENCIL_ENABLE;
693 z_stencil_control |=
694 (r300_translate_depth_stencil_function(state->stencil[0].func) <<
695 R300_S_FRONT_FUNC_SHIFT) |
696 (r300_translate_stencil_op(state->stencil[0].fail_op) <<
697 R300_S_FRONT_SFAIL_OP_SHIFT) |
698 (r300_translate_stencil_op(state->stencil[0].zpass_op) <<
699 R300_S_FRONT_ZPASS_OP_SHIFT) |
700 (r300_translate_stencil_op(state->stencil[0].zfail_op) <<
701 R300_S_FRONT_ZFAIL_OP_SHIFT);
702
703 stencil_ref_mask =
704 (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) |
705 (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT);
706
707 if (state->stencil[1].enabled) {
708 dsa->two_sided = true;
709
710 z_buffer_control |= R300_STENCIL_FRONT_BACK;
711 z_stencil_control |=
712 (r300_translate_depth_stencil_function(state->stencil[1].func) <<
713 R300_S_BACK_FUNC_SHIFT) |
714 (r300_translate_stencil_op(state->stencil[1].fail_op) <<
715 R300_S_BACK_SFAIL_OP_SHIFT) |
716 (r300_translate_stencil_op(state->stencil[1].zpass_op) <<
717 R300_S_BACK_ZPASS_OP_SHIFT) |
718 (r300_translate_stencil_op(state->stencil[1].zfail_op) <<
719 R300_S_BACK_ZFAIL_OP_SHIFT);
720
721 stencil_ref_bf =
722 (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) |
723 (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT);
724
725 if (is_r500) {
726 z_buffer_control |= R500_STENCIL_REFMASK_FRONT_BACK;
727 } else {
728 dsa->two_sided_stencil_ref =
729 (state->stencil[0].valuemask != state->stencil[1].valuemask ||
730 state->stencil[0].writemask != state->stencil[1].writemask);
731 }
732 }
733 }
734
735 /* Alpha test setup. */
736 if (state->alpha_enabled) {
737 dsa->alpha_function =
738 r300_translate_alpha_function(state->alpha_func) |
739 R300_FG_ALPHA_FUNC_ENABLE;
740
741 dsa->alpha_function |= float_to_ubyte(state->alpha_ref_value);
742 alpha_value_fp16 = _mesa_float_to_half(state->alpha_ref_value);
743 }
744
745 BEGIN_CB(&dsa->cb_begin, 8);
746 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
747 OUT_CB(z_buffer_control);
748 OUT_CB(z_stencil_control);
749 OUT_CB(stencil_ref_mask);
750 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, stencil_ref_bf);
751 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
752 END_CB;
753
754 BEGIN_CB(dsa->cb_zb_no_readwrite, 8);
755 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
756 OUT_CB(0);
757 OUT_CB(0);
758 OUT_CB(0);
759 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, 0);
760 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
761 END_CB;
762
763 return (void*)dsa;
764 }
765
r300_dsa_inject_stencilref(struct r300_context * r300)766 static void r300_dsa_inject_stencilref(struct r300_context *r300)
767 {
768 struct r300_dsa_state *dsa =
769 (struct r300_dsa_state*)r300->dsa_state.state;
770
771 if (!dsa)
772 return;
773
774 dsa->stencil_ref_mask =
775 (dsa->stencil_ref_mask & ~R300_STENCILREF_MASK) |
776 r300->stencil_ref.ref_value[0];
777 dsa->stencil_ref_bf =
778 (dsa->stencil_ref_bf & ~R300_STENCILREF_MASK) |
779 r300->stencil_ref.ref_value[1];
780 }
781
782 /* Bind DSA state. */
r300_bind_dsa_state(struct pipe_context * pipe,void * state)783 static void r300_bind_dsa_state(struct pipe_context* pipe,
784 void* state)
785 {
786 struct r300_context* r300 = r300_context(pipe);
787
788 if (!state) {
789 return;
790 }
791
792 UPDATE_STATE(state, r300->dsa_state);
793
794 r300_mark_atom_dirty(r300, &r300->hyperz_state); /* Will be updated before the emission. */
795 r300_dsa_inject_stencilref(r300);
796 }
797
798 /* Free DSA state. */
r300_delete_dsa_state(struct pipe_context * pipe,void * state)799 static void r300_delete_dsa_state(struct pipe_context* pipe,
800 void* state)
801 {
802 FREE(state);
803 }
804
r300_set_stencil_ref(struct pipe_context * pipe,const struct pipe_stencil_ref sr)805 static void r300_set_stencil_ref(struct pipe_context* pipe,
806 const struct pipe_stencil_ref sr)
807 {
808 struct r300_context* r300 = r300_context(pipe);
809
810 r300->stencil_ref = sr;
811
812 r300_dsa_inject_stencilref(r300);
813 r300_mark_atom_dirty(r300, &r300->dsa_state);
814 }
815
r300_print_fb_surf_info(struct pipe_surface * surf,unsigned index,const char * binding)816 static void r300_print_fb_surf_info(struct pipe_surface *surf, unsigned index,
817 const char *binding)
818 {
819 struct pipe_resource *tex = surf->texture;
820 struct r300_resource *rtex = r300_resource(tex);
821
822 fprintf(stderr,
823 "r300: %s[%i] Dim: %ix%i, Firstlayer: %i, "
824 "Lastlayer: %i, Level: %i, Format: %s\n"
825
826 "r300: TEX: Macro: %s, Micro: %s, "
827 "Dim: %ix%ix%i, LastLevel: %i, Format: %s\n",
828
829 binding, index, surf->width, surf->height,
830 surf->u.tex.first_layer, surf->u.tex.last_layer, surf->u.tex.level,
831 util_format_short_name(surf->format),
832
833 rtex->tex.macrotile[0] ? "YES" : " NO",
834 rtex->tex.microtile ? "YES" : " NO",
835 tex->width0, tex->height0, tex->depth0,
836 tex->last_level, util_format_short_name(surf->format));
837 }
838
r300_mark_fb_state_dirty(struct r300_context * r300,enum r300_fb_state_change change)839 void r300_mark_fb_state_dirty(struct r300_context *r300,
840 enum r300_fb_state_change change)
841 {
842 struct pipe_framebuffer_state *state = r300->fb_state.state;
843
844 r300_mark_atom_dirty(r300, &r300->gpu_flush);
845 r300_mark_atom_dirty(r300, &r300->fb_state);
846
847 /* What is marked as dirty depends on the enum r300_fb_state_change. */
848 if (change == R300_CHANGED_FB_STATE) {
849 r300_mark_atom_dirty(r300, &r300->aa_state);
850 r300_mark_atom_dirty(r300, &r300->dsa_state); /* for AlphaRef */
851 r300_set_blend_color(&r300->context, r300->blend_color_state.state);
852 }
853
854 if (change == R300_CHANGED_FB_STATE ||
855 change == R300_CHANGED_HYPERZ_FLAG) {
856 r300_mark_atom_dirty(r300, &r300->hyperz_state);
857 }
858
859 if (change == R300_CHANGED_FB_STATE ||
860 change == R300_CHANGED_MULTIWRITE) {
861 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined);
862 }
863
864 /* Now compute the fb_state atom size. */
865 r300->fb_state.size = 2 + (8 * state->nr_cbufs);
866
867 if (r300->cbzb_clear)
868 r300->fb_state.size += 10;
869 else if (state->zsbuf) {
870 r300->fb_state.size += 10;
871 if (r300->hyperz_enabled)
872 r300->fb_state.size += 8;
873 }
874
875 if (r300->cmask_in_use) {
876 r300->fb_state.size += 6;
877 if (r300->screen->caps.is_r500) {
878 r300->fb_state.size += 3;
879 }
880 }
881
882 /* The size of the rest of atoms stays the same. */
883 }
884
885 static void
r300_set_framebuffer_state(struct pipe_context * pipe,const struct pipe_framebuffer_state * state)886 r300_set_framebuffer_state(struct pipe_context* pipe,
887 const struct pipe_framebuffer_state* state)
888 {
889 struct r300_context* r300 = r300_context(pipe);
890 struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
891 struct pipe_framebuffer_state *current_state = r300->fb_state.state;
892 unsigned max_width, max_height, i;
893 uint32_t zbuffer_bpp = 0;
894 bool unlock_zbuffer = false;
895
896 if (r300->screen->caps.is_r500) {
897 max_width = max_height = 4096;
898 } else if (r300->screen->caps.is_r400) {
899 max_width = max_height = 4021;
900 } else {
901 max_width = max_height = 2560;
902 }
903
904 if (state->width > max_width || state->height > max_height) {
905 fprintf(stderr, "r300: Implementation error: Render targets are too "
906 "big in %s, refusing to bind framebuffer state!\n", __func__);
907 return;
908 }
909
910 if (current_state->zsbuf && r300->zmask_in_use && !r300->locked_zbuffer) {
911 /* There is a zmask in use, what are we gonna do? */
912 if (state->zsbuf) {
913 if (!pipe_surface_equal(current_state->zsbuf, state->zsbuf)) {
914 /* Decompress the currently bound zbuffer before we bind another one. */
915 r300_decompress_zmask(r300);
916 r300->hiz_in_use = false;
917 }
918 } else {
919 /* We don't bind another zbuffer, so lock the current one. */
920 pipe_surface_reference(&r300->locked_zbuffer, current_state->zsbuf);
921 }
922 } else if (r300->locked_zbuffer) {
923 /* We have a locked zbuffer now, what are we gonna do? */
924 if (state->zsbuf) {
925 if (!pipe_surface_equal(r300->locked_zbuffer, state->zsbuf)) {
926 /* We are binding some other zbuffer, so decompress the locked one,
927 * it gets unlocked automatically. */
928 r300_decompress_zmask_locked_unsafe(r300);
929 r300->hiz_in_use = false;
930 } else {
931 /* We are binding the locked zbuffer again, so unlock it. */
932 unlock_zbuffer = true;
933 }
934 }
935 }
936 assert(state->zsbuf || (r300->locked_zbuffer && !unlock_zbuffer) || !r300->zmask_in_use);
937
938 /* If zsbuf is set from NULL to non-NULL or vice versa.. */
939 if (!!current_state->zsbuf != !!state->zsbuf) {
940 r300_mark_atom_dirty(r300, &r300->dsa_state);
941 }
942
943 util_copy_framebuffer_state(r300->fb_state.state, state);
944
945 /* Remove trailing NULL colorbuffers. */
946 while (current_state->nr_cbufs && !current_state->cbufs[current_state->nr_cbufs-1])
947 current_state->nr_cbufs--;
948
949 /* Set whether CMASK can be used. */
950 r300->cmask_in_use =
951 state->nr_cbufs == 1 && state->cbufs[0] &&
952 r300->screen->cmask_resource == state->cbufs[0]->texture;
953
954 /* Need to reset clamping or colormask. */
955 r300_mark_atom_dirty(r300, &r300->blend_state);
956
957 /* Re-swizzle the blend color. */
958 r300_set_blend_color(pipe, &((struct r300_blend_color_state*)r300->blend_color_state.state)->state);
959
960 if (unlock_zbuffer) {
961 pipe_surface_reference(&r300->locked_zbuffer, NULL);
962 }
963
964 r300_mark_fb_state_dirty(r300, R300_CHANGED_FB_STATE);
965
966 if (state->zsbuf) {
967 switch (util_format_get_blocksize(state->zsbuf->format)) {
968 case 2:
969 zbuffer_bpp = 16;
970 break;
971 case 4:
972 zbuffer_bpp = 24;
973 break;
974 }
975
976 /* Polygon offset depends on the zbuffer bit depth. */
977 if (r300->zbuffer_bpp != zbuffer_bpp) {
978 r300->zbuffer_bpp = zbuffer_bpp;
979
980 if (r300->polygon_offset_enabled)
981 r300_mark_atom_dirty(r300, &r300->rs_state);
982 }
983 }
984
985 r300->num_samples = util_framebuffer_get_num_samples(state);
986
987 /* Set up AA config. */
988 if (r300->num_samples > 1) {
989 switch (r300->num_samples) {
990 case 2:
991 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
992 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2;
993 break;
994 case 4:
995 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
996 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4;
997 break;
998 case 6:
999 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
1000 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6;
1001 break;
1002 }
1003 } else {
1004 aa->aa_config = 0;
1005 }
1006
1007 if (DBG_ON(r300, DBG_FB)) {
1008 fprintf(stderr, "r300: set_framebuffer_state:\n");
1009 for (i = 0; i < state->nr_cbufs; i++) {
1010 if (state->cbufs[i])
1011 r300_print_fb_surf_info(state->cbufs[i], i, "CB");
1012 }
1013 if (state->zsbuf) {
1014 r300_print_fb_surf_info(state->zsbuf, 0, "ZB");
1015 }
1016 }
1017 }
1018
1019 /* Create fragment shader state. */
r300_create_fs_state(struct pipe_context * pipe,const struct pipe_shader_state * shader)1020 static void* r300_create_fs_state(struct pipe_context* pipe,
1021 const struct pipe_shader_state* shader)
1022 {
1023 struct r300_context* r300 = r300_context(pipe);
1024 struct r300_fragment_shader* fs = NULL;
1025
1026 fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader);
1027
1028 /* Copy state directly into shader. */
1029 fs->state = *shader;
1030
1031 if (fs->state.type == PIPE_SHADER_IR_NIR) {
1032 //fs->state.tokens = nir_to_rc(shader->ir.nir, pipe->screen);
1033 } else {
1034 assert(fs->state.type == PIPE_SHADER_IR_TGSI);
1035 /* we need to keep a local copy of the tokens */
1036 fs->state.tokens = tgsi_dup_tokens(fs->state.tokens);
1037 }
1038
1039 /* Precompile the fragment shader at creation time to avoid jank at runtime.
1040 * In most cases we won't have anything in the key at draw time.
1041 */
1042 struct r300_fragment_program_external_state precompile_state;
1043 memset(&precompile_state, 0, sizeof(precompile_state));
1044
1045 if (fs->state.type == PIPE_SHADER_IR_NIR) {
1046 /* Pick something for the shadow samplers so that we have somewhat reliable shader stats later. */
1047 nir_foreach_function_impl(impl, shader->ir.nir) {
1048 nir_foreach_block_safe(block, impl) {
1049 nir_foreach_instr_safe(instr, block) {
1050 if (instr->type != nir_instr_type_tex)
1051 continue;
1052 nir_tex_instr *tex = nir_instr_as_tex(instr);
1053
1054 if (tex->is_shadow) {
1055 precompile_state.unit[tex->sampler_index].compare_mode_enabled = true;
1056 precompile_state.unit[tex->sampler_index].texture_compare_func = PIPE_FUNC_LESS;
1057 }
1058 precompile_state.sampler_state_count = MAX2(tex->sampler_index + 1,
1059 precompile_state.sampler_state_count);
1060 }
1061 }
1062 }
1063 }
1064 r300_pick_fragment_shader(r300, fs, &precompile_state);
1065
1066 return (void *)fs;
1067 }
1068
r300_mark_fs_code_dirty(struct r300_context * r300)1069 void r300_mark_fs_code_dirty(struct r300_context *r300)
1070 {
1071 struct r300_fragment_shader* fs = r300_fs(r300);
1072
1073 r300_mark_atom_dirty(r300, &r300->fs);
1074 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1075 r300_mark_atom_dirty(r300, &r300->fs_constants);
1076 r300->fs.size = fs->shader->cb_code_size;
1077
1078 if (r300->screen->caps.is_r500) {
1079 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 7;
1080 r300->fs_constants.size = fs->shader->externals_count * 4 + 3;
1081 } else {
1082 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 5;
1083 r300->fs_constants.size = fs->shader->externals_count * 4 + 1;
1084 }
1085
1086 ((struct r300_constant_buffer*)r300->fs_constants.state)->remap_table =
1087 fs->shader->code.constants_remap_table;
1088 }
1089
1090 /* Bind fragment shader state. */
r300_bind_fs_state(struct pipe_context * pipe,void * shader)1091 static void r300_bind_fs_state(struct pipe_context* pipe, void* shader)
1092 {
1093 struct r300_context* r300 = r300_context(pipe);
1094 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1095
1096 if (!fs) {
1097 r300->fs.state = NULL;
1098 return;
1099 }
1100
1101 r300->fs.state = fs;
1102 r300->fs_status = FRAGMENT_SHADER_DIRTY;
1103
1104 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1105 }
1106
1107 /* Delete fragment shader state. */
r300_delete_fs_state(struct pipe_context * pipe,void * shader)1108 static void r300_delete_fs_state(struct pipe_context* pipe, void* shader)
1109 {
1110 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1111 struct r300_fragment_shader_code *tmp, *ptr = fs->first;
1112
1113 free(fs->shader->code.constants_remap_table);
1114
1115 while (ptr) {
1116 tmp = ptr;
1117 ptr = ptr->next;
1118 rc_constants_destroy(&tmp->code.constants);
1119 FREE(tmp->cb_code);
1120 FREE(tmp);
1121 }
1122 if (fs->state.type == PIPE_SHADER_IR_NIR) {
1123 ralloc_free(fs->state.ir.nir);
1124 } else {
1125 FREE((void*)fs->state.tokens);
1126 }
1127 FREE(shader);
1128 }
1129
r300_set_polygon_stipple(struct pipe_context * pipe,const struct pipe_poly_stipple * state)1130 static void r300_set_polygon_stipple(struct pipe_context* pipe,
1131 const struct pipe_poly_stipple* state)
1132 {
1133 }
1134
1135 /* Create a new rasterizer state based on the CSO rasterizer state.
1136 *
1137 * This is a very large chunk of state, and covers most of the graphics
1138 * backend (GB), geometry assembly (GA), and setup unit (SU) blocks.
1139 *
1140 * In a not entirely unironic sidenote, this state has nearly nothing to do
1141 * with the actual block on the Radeon called the rasterizer (RS). */
r300_create_rs_state(struct pipe_context * pipe,const struct pipe_rasterizer_state * state)1142 static void* r300_create_rs_state(struct pipe_context* pipe,
1143 const struct pipe_rasterizer_state* state)
1144 {
1145 struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state);
1146 uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */
1147 uint32_t vap_clip_cntl; /* R300_VAP_CLIP_CNTL: 0x221C */
1148 uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */
1149 uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */
1150 uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */
1151 uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */
1152 uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */
1153 uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */
1154 uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */
1155 uint32_t polygon_mode; /* R300_GA_POLY_MODE: 0x4288 */
1156 uint32_t clip_rule; /* R300_SC_CLIP_RULE: 0x43D0 */
1157 uint32_t round_mode; /* R300_GA_ROUND_MODE: 0x428c */
1158
1159 /* Point sprites texture coordinates, 0: lower left, 1: upper right */
1160 float point_texcoord_left = 0; /* R300_GA_POINT_S0: 0x4200 */
1161 float point_texcoord_bottom = 0;/* R300_GA_POINT_T0: 0x4204 */
1162 float point_texcoord_right = 1; /* R300_GA_POINT_S1: 0x4208 */
1163 float point_texcoord_top = 0; /* R300_GA_POINT_T1: 0x420c */
1164 bool vclamp = !r300_context(pipe)->screen->caps.is_r500;
1165 CB_LOCALS;
1166
1167 /* Copy rasterizer state. */
1168 rs->rs = *state;
1169 rs->rs_draw = *state;
1170
1171 rs->rs.sprite_coord_enable = state->point_quad_rasterization *
1172 state->sprite_coord_enable;
1173 r300_context(pipe)->is_point = false;
1174
1175 /* Override some states for Draw. */
1176 rs->rs_draw.sprite_coord_enable = 0; /* We can do this in HW. */
1177 rs->rs_draw.offset_point = 0;
1178 rs->rs_draw.offset_line = 0;
1179 rs->rs_draw.offset_tri = 0;
1180 rs->rs_draw.offset_clamp = 0;
1181
1182 #if UTIL_ARCH_LITTLE_ENDIAN
1183 vap_control_status = R300_VC_NO_SWAP;
1184 #else
1185 vap_control_status = R300_VC_32BIT_SWAP;
1186 #endif
1187
1188 /* If no TCL engine is present, turn off the HW TCL. */
1189 if (!r300_screen(pipe->screen)->caps.has_tcl) {
1190 vap_control_status |= R300_VAP_TCL_BYPASS;
1191 }
1192
1193 /* Point size width and height. */
1194 point_size =
1195 pack_float_16_6x(state->point_size) |
1196 (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT);
1197
1198 /* Point size clamping. */
1199 if (state->point_size_per_vertex) {
1200 /* Per-vertex point size.
1201 * Clamp to [0, max FB size] */
1202 float min_psiz = util_get_min_point_size(state);
1203 float max_psiz = pipe->screen->caps.max_point_size;
1204 point_minmax =
1205 (pack_float_16_6x(min_psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1206 (pack_float_16_6x(max_psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1207 } else {
1208 /* We cannot disable the point-size vertex output,
1209 * so clamp it. */
1210 float psiz = state->point_size;
1211 point_minmax =
1212 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1213 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1214 }
1215
1216 /* Line control. */
1217 line_control = pack_float_16_6x(state->line_width) |
1218 (state->line_smooth ? R300_GA_LINE_CNTL_END_TYPE_COMP : R300_GA_LINE_CNTL_END_TYPE_SQR);
1219
1220 /* Enable polygon mode */
1221 polygon_mode = 0;
1222 if (state->fill_front != PIPE_POLYGON_MODE_FILL ||
1223 state->fill_back != PIPE_POLYGON_MODE_FILL) {
1224 polygon_mode = R300_GA_POLY_MODE_DUAL;
1225 }
1226
1227 /* Front face */
1228 if (state->front_ccw)
1229 cull_mode = R300_FRONT_FACE_CCW;
1230 else
1231 cull_mode = R300_FRONT_FACE_CW;
1232
1233 /* Polygon offset */
1234 polygon_offset_enable = 0;
1235 if (util_get_offset(state, state->fill_front)) {
1236 polygon_offset_enable |= R300_FRONT_ENABLE;
1237 }
1238 if (util_get_offset(state, state->fill_back)) {
1239 polygon_offset_enable |= R300_BACK_ENABLE;
1240 }
1241
1242 rs->polygon_offset_enable = polygon_offset_enable != 0;
1243
1244 /* Polygon mode */
1245 if (polygon_mode) {
1246 polygon_mode |=
1247 r300_translate_polygon_mode_front(state->fill_front);
1248 polygon_mode |=
1249 r300_translate_polygon_mode_back(state->fill_back);
1250 }
1251
1252 if (state->cull_face & PIPE_FACE_FRONT) {
1253 cull_mode |= R300_CULL_FRONT;
1254 }
1255 if (state->cull_face & PIPE_FACE_BACK) {
1256 cull_mode |= R300_CULL_BACK;
1257 }
1258
1259 if (state->line_stipple_enable) {
1260 line_stipple_config =
1261 R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE |
1262 (fui((float)state->line_stipple_factor) &
1263 R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK);
1264 /* XXX this might need to be scaled up */
1265 line_stipple_value = state->line_stipple_pattern;
1266 } else {
1267 line_stipple_config = 0;
1268 line_stipple_value = 0;
1269 }
1270
1271 if (state->flatshade) {
1272 rs->color_control = R300_SHADE_MODEL_FLAT;
1273 } else {
1274 rs->color_control = R300_SHADE_MODEL_SMOOTH;
1275 }
1276
1277 clip_rule = state->scissor ? 0xAAAA : 0xFFFF;
1278
1279 /* Point sprites coord mode */
1280 switch (state->sprite_coord_mode) {
1281 case PIPE_SPRITE_COORD_UPPER_LEFT:
1282 point_texcoord_top = 0.0f;
1283 point_texcoord_bottom = 1.0f;
1284 break;
1285 case PIPE_SPRITE_COORD_LOWER_LEFT:
1286 point_texcoord_top = 1.0f;
1287 point_texcoord_bottom = 0.0f;
1288 break;
1289 }
1290
1291 if (r300_screen(pipe->screen)->caps.has_tcl) {
1292 vap_clip_cntl = (state->clip_plane_enable & 63) |
1293 R300_PS_UCP_MODE_CLIP_AS_TRIFAN;
1294 } else {
1295 vap_clip_cntl = R300_CLIP_DISABLE;
1296 }
1297
1298 /* Vertex color clamping. FP20 means no clamping. */
1299 round_mode =
1300 R300_GA_ROUND_MODE_GEOMETRY_ROUND_NEAREST |
1301 (!vclamp ? (R300_GA_ROUND_MODE_RGB_CLAMP_FP20 |
1302 R300_GA_ROUND_MODE_ALPHA_CLAMP_FP20) : 0);
1303
1304 /* Build the main command buffer. */
1305 BEGIN_CB(rs->cb_main, RS_STATE_MAIN_SIZE);
1306 OUT_CB_REG(R300_VAP_CNTL_STATUS, vap_control_status);
1307 OUT_CB_REG(R300_VAP_CLIP_CNTL, vap_clip_cntl);
1308 OUT_CB_REG(R300_GA_POINT_SIZE, point_size);
1309 OUT_CB_REG_SEQ(R300_GA_POINT_MINMAX, 2);
1310 OUT_CB(point_minmax);
1311 OUT_CB(line_control);
1312 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE, 2);
1313 OUT_CB(polygon_offset_enable);
1314 rs->cull_mode_index = 11;
1315 OUT_CB(cull_mode);
1316 OUT_CB_REG(R300_GA_LINE_STIPPLE_CONFIG, line_stipple_config);
1317 OUT_CB_REG(R300_GA_LINE_STIPPLE_VALUE, line_stipple_value);
1318 OUT_CB_REG(R300_GA_POLY_MODE, polygon_mode);
1319 OUT_CB_REG(R300_GA_ROUND_MODE, round_mode);
1320 OUT_CB_REG(R300_SC_CLIP_RULE, clip_rule);
1321 OUT_CB_REG_SEQ(R300_GA_POINT_S0, 4);
1322 OUT_CB_32F(point_texcoord_left);
1323 OUT_CB_32F(point_texcoord_bottom);
1324 OUT_CB_32F(point_texcoord_right);
1325 OUT_CB_32F(point_texcoord_top);
1326 END_CB;
1327
1328 /* Build the two command buffers for polygon offset setup. */
1329 if (polygon_offset_enable) {
1330 float scale = state->offset_scale * 12;
1331 float offset = state->offset_units * 4;
1332
1333 BEGIN_CB(rs->cb_poly_offset_zb16, 5);
1334 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1335 OUT_CB_32F(scale);
1336 OUT_CB_32F(offset);
1337 OUT_CB_32F(scale);
1338 OUT_CB_32F(offset);
1339 END_CB;
1340
1341 offset = state->offset_units * 2;
1342
1343 BEGIN_CB(rs->cb_poly_offset_zb24, 5);
1344 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1345 OUT_CB_32F(scale);
1346 OUT_CB_32F(offset);
1347 OUT_CB_32F(scale);
1348 OUT_CB_32F(offset);
1349 END_CB;
1350 }
1351
1352 return (void*)rs;
1353 }
1354
1355 /* Bind rasterizer state. */
r300_bind_rs_state(struct pipe_context * pipe,void * state)1356 static void r300_bind_rs_state(struct pipe_context* pipe, void* state)
1357 {
1358 struct r300_context* r300 = r300_context(pipe);
1359 struct r300_rs_state* rs = (struct r300_rs_state*)state;
1360 int last_sprite_coord_enable = r300->sprite_coord_enable;
1361 bool last_two_sided_color = r300->two_sided_color;
1362 bool last_msaa_enable = r300->msaa_enable;
1363 bool last_flatshade = r300->flatshade;
1364 bool last_clip_halfz = r300->clip_halfz;
1365
1366 if (r300->draw && rs) {
1367 draw_set_rasterizer_state(r300->draw, &rs->rs_draw, state);
1368 }
1369
1370 if (rs) {
1371 r300->polygon_offset_enabled = rs->polygon_offset_enable;
1372 r300->sprite_coord_enable = rs->rs.sprite_coord_enable;
1373 r300->two_sided_color = rs->rs.light_twoside;
1374 r300->msaa_enable = rs->rs.multisample;
1375 r300->flatshade = rs->rs.flatshade;
1376 r300->clip_halfz = rs->rs.clip_halfz;
1377 } else {
1378 r300->polygon_offset_enabled = false;
1379 r300->sprite_coord_enable = 0;
1380 r300->two_sided_color = false;
1381 r300->msaa_enable = false;
1382 r300->flatshade = false;
1383 r300->clip_halfz = false;
1384 }
1385
1386 UPDATE_STATE(state, r300->rs_state);
1387 r300->rs_state.size = RS_STATE_MAIN_SIZE + (r300->polygon_offset_enabled ? 5 : 0);
1388
1389 if (last_sprite_coord_enable != r300->sprite_coord_enable ||
1390 last_two_sided_color != r300->two_sided_color ||
1391 last_flatshade != r300->flatshade) {
1392 r300_mark_atom_dirty(r300, &r300->rs_block_state);
1393 }
1394
1395 if (last_msaa_enable != r300->msaa_enable) {
1396 if (r300->alpha_to_coverage) {
1397 r300_mark_atom_dirty(r300, &r300->dsa_state);
1398 }
1399
1400 if (r300->alpha_to_one &&
1401 r300->fs_status == FRAGMENT_SHADER_VALID) {
1402 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
1403 }
1404 }
1405
1406 if (r300->screen->caps.has_tcl && last_clip_halfz != r300->clip_halfz) {
1407 r300_mark_atom_dirty(r300, &r300->vs_state);
1408 }
1409 }
1410
1411 /* Free rasterizer state. */
r300_delete_rs_state(struct pipe_context * pipe,void * state)1412 static void r300_delete_rs_state(struct pipe_context* pipe, void* state)
1413 {
1414 FREE(state);
1415 }
1416
1417 static void*
r300_create_sampler_state(struct pipe_context * pipe,const struct pipe_sampler_state * state)1418 r300_create_sampler_state(struct pipe_context* pipe,
1419 const struct pipe_sampler_state* state)
1420 {
1421 struct r300_context* r300 = r300_context(pipe);
1422 struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state);
1423 bool is_r500 = r300->screen->caps.is_r500;
1424 int lod_bias;
1425
1426 sampler->state = *state;
1427
1428 /* r300 doesn't handle CLAMP and MIRROR_CLAMP correctly when either MAG
1429 * or MIN filter is NEAREST. Since texwrap produces same results
1430 * for CLAMP and CLAMP_TO_EDGE, we use them instead. */
1431 if (sampler->state.min_img_filter == PIPE_TEX_FILTER_NEAREST ||
1432 sampler->state.mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
1433 /* Wrap S. */
1434 if (sampler->state.wrap_s == PIPE_TEX_WRAP_CLAMP)
1435 sampler->state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1436 else if (sampler->state.wrap_s == PIPE_TEX_WRAP_MIRROR_CLAMP)
1437 sampler->state.wrap_s = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1438
1439 /* Wrap T. */
1440 if (sampler->state.wrap_t == PIPE_TEX_WRAP_CLAMP)
1441 sampler->state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1442 else if (sampler->state.wrap_t == PIPE_TEX_WRAP_MIRROR_CLAMP)
1443 sampler->state.wrap_t = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1444
1445 /* Wrap R. */
1446 if (sampler->state.wrap_r == PIPE_TEX_WRAP_CLAMP)
1447 sampler->state.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1448 else if (sampler->state.wrap_r == PIPE_TEX_WRAP_MIRROR_CLAMP)
1449 sampler->state.wrap_r = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1450 }
1451
1452 sampler->filter0 |=
1453 (r300_translate_wrap(sampler->state.wrap_s) << R300_TX_WRAP_S_SHIFT) |
1454 (r300_translate_wrap(sampler->state.wrap_t) << R300_TX_WRAP_T_SHIFT) |
1455 (r300_translate_wrap(sampler->state.wrap_r) << R300_TX_WRAP_R_SHIFT);
1456
1457 sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter,
1458 state->mag_img_filter,
1459 state->min_mip_filter,
1460 state->max_anisotropy > 1);
1461
1462 sampler->filter0 |= r300_anisotropy(state->max_anisotropy);
1463
1464 /* Unfortunately, r300-r500 don't support floating-point mipmap lods. */
1465 /* We must pass these to the merge function to clamp them properly. */
1466 sampler->min_lod = (unsigned)MAX2(state->min_lod, 0);
1467 sampler->max_lod = (unsigned)MAX2(ceilf(state->max_lod), 0);
1468
1469 lod_bias = CLAMP((int)(state->lod_bias * 32 + 1), -(1 << 9), (1 << 9) - 1);
1470
1471 sampler->filter1 |= (lod_bias << R300_LOD_BIAS_SHIFT) & R300_LOD_BIAS_MASK;
1472
1473 /* This is very high quality anisotropic filtering for R5xx.
1474 * It's good for benchmarking the performance of texturing but
1475 * in practice we don't want to slow down the driver because it's
1476 * a pretty good performance killer. Feel free to play with it. */
1477 if (DBG_ON(r300, DBG_ANISOHQ) && is_r500) {
1478 sampler->filter1 |= r500_anisotropy(state->max_anisotropy);
1479 }
1480
1481 /* R500-specific fixups and optimizations */
1482 if (r300->screen->caps.is_r500) {
1483 sampler->filter1 |= R500_BORDER_FIX;
1484 }
1485
1486 return (void*)sampler;
1487 }
1488
r300_bind_sampler_states(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,void ** states)1489 static void r300_bind_sampler_states(struct pipe_context* pipe,
1490 enum pipe_shader_type shader,
1491 unsigned start, unsigned count,
1492 void** states)
1493 {
1494 struct r300_context* r300 = r300_context(pipe);
1495 struct r300_textures_state* state =
1496 (struct r300_textures_state*)r300->textures_state.state;
1497 unsigned tex_units = r300->screen->caps.num_tex_units;
1498
1499 assert(start == 0);
1500
1501 if (shader != PIPE_SHADER_FRAGMENT)
1502 return;
1503
1504 if (count > tex_units)
1505 return;
1506
1507 memcpy(state->sampler_states, states, sizeof(void*) * count);
1508 state->sampler_state_count = count;
1509
1510 r300_mark_atom_dirty(r300, &r300->textures_state);
1511 }
1512
r300_delete_sampler_state(struct pipe_context * pipe,void * state)1513 static void r300_delete_sampler_state(struct pipe_context* pipe, void* state)
1514 {
1515 FREE(state);
1516 }
1517
r300_assign_texture_cache_region(unsigned index,unsigned num)1518 static uint32_t r300_assign_texture_cache_region(unsigned index, unsigned num)
1519 {
1520 /* This looks like a hack, but I believe it's suppose to work like
1521 * that. To illustrate how this works, let's assume you have 5 textures.
1522 * From docs, 5 and the successive numbers are:
1523 *
1524 * FOURTH_1 = 5
1525 * FOURTH_2 = 6
1526 * FOURTH_3 = 7
1527 * EIGHTH_0 = 8
1528 * EIGHTH_1 = 9
1529 *
1530 * First 3 textures will get 3/4 of size of the cache, divided evenly
1531 * between them. The last 1/4 of the cache must be divided between
1532 * the last 2 textures, each will therefore get 1/8 of the cache.
1533 * Why not just to use "5 + texture_index" ?
1534 *
1535 * This simple trick works for all "num" <= 16.
1536 */
1537 if (num <= 1)
1538 return R300_TX_CACHE(R300_TX_CACHE_WHOLE);
1539 else
1540 return R300_TX_CACHE(num + index);
1541 }
1542
r300_set_sampler_views(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)1543 static void r300_set_sampler_views(struct pipe_context* pipe,
1544 enum pipe_shader_type shader,
1545 unsigned start, unsigned count,
1546 unsigned unbind_num_trailing_slots,
1547 bool take_ownership,
1548 struct pipe_sampler_view** views)
1549 {
1550 struct r300_context* r300 = r300_context(pipe);
1551 struct r300_textures_state* state =
1552 (struct r300_textures_state*)r300->textures_state.state;
1553 struct r300_resource *texture;
1554 unsigned i, real_num_views = 0, view_index = 0;
1555 unsigned tex_units = r300->screen->caps.num_tex_units;
1556 bool dirty_tex = false;
1557
1558 assert(start == 0); /* non-zero not handled yet */
1559
1560 if (shader != PIPE_SHADER_FRAGMENT || count > tex_units) {
1561 if (take_ownership) {
1562 for (unsigned i = 0; i < count; i++) {
1563 struct pipe_sampler_view *view = views[i];
1564 pipe_sampler_view_reference(&view, NULL);
1565 }
1566 }
1567 return;
1568 }
1569
1570 /* Calculate the real number of views. */
1571 for (i = 0; i < count; i++) {
1572 if (views[i])
1573 real_num_views++;
1574 }
1575
1576 for (i = 0; i < count; i++) {
1577 if (take_ownership) {
1578 pipe_sampler_view_reference(
1579 (struct pipe_sampler_view**)&state->sampler_views[i], NULL);
1580 state->sampler_views[i] = (struct r300_sampler_view*)views[i];
1581 } else {
1582 pipe_sampler_view_reference(
1583 (struct pipe_sampler_view**)&state->sampler_views[i],
1584 views[i]);
1585 }
1586
1587 if (!views[i]) {
1588 continue;
1589 }
1590
1591 /* A new sampler view (= texture)... */
1592 dirty_tex = true;
1593
1594 /* Set the texrect factor in the fragment shader.
1595 * Needed for RECT and NPOT fallback. */
1596 texture = r300_resource(views[i]->texture);
1597 if (texture->tex.is_npot) {
1598 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1599 }
1600
1601 state->sampler_views[i]->texcache_region =
1602 r300_assign_texture_cache_region(view_index, real_num_views);
1603 view_index++;
1604 }
1605
1606 for (i = count; i < tex_units; i++) {
1607 if (state->sampler_views[i]) {
1608 pipe_sampler_view_reference(
1609 (struct pipe_sampler_view**)&state->sampler_views[i],
1610 NULL);
1611 }
1612 }
1613
1614 state->sampler_view_count = count;
1615
1616 r300_mark_atom_dirty(r300, &r300->textures_state);
1617
1618 if (dirty_tex) {
1619 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
1620 }
1621 }
1622
1623 struct pipe_sampler_view *
r300_create_sampler_view_custom(struct pipe_context * pipe,struct pipe_resource * texture,const struct pipe_sampler_view * templ,unsigned width0_override,unsigned height0_override)1624 r300_create_sampler_view_custom(struct pipe_context *pipe,
1625 struct pipe_resource *texture,
1626 const struct pipe_sampler_view *templ,
1627 unsigned width0_override,
1628 unsigned height0_override)
1629 {
1630 struct r300_sampler_view *view = CALLOC_STRUCT(r300_sampler_view);
1631 struct r300_resource *tex = r300_resource(texture);
1632 bool is_r500 = r300_screen(pipe->screen)->caps.is_r500;
1633 bool dxtc_swizzle = r300_screen(pipe->screen)->caps.dxtc_swizzle;
1634
1635 if (view) {
1636 unsigned hwformat;
1637
1638 view->base = *templ;
1639 view->base.reference.count = 1;
1640 view->base.context = pipe;
1641 view->base.texture = NULL;
1642 pipe_resource_reference(&view->base.texture, texture);
1643
1644 view->width0_override = width0_override;
1645 view->height0_override = height0_override;
1646 view->swizzle[0] = templ->swizzle_r;
1647 view->swizzle[1] = templ->swizzle_g;
1648 view->swizzle[2] = templ->swizzle_b;
1649 view->swizzle[3] = templ->swizzle_a;
1650
1651 hwformat = r300_translate_texformat(templ->format,
1652 view->swizzle,
1653 is_r500,
1654 dxtc_swizzle);
1655
1656 if (hwformat == ~0) {
1657 fprintf(stderr, "r300: Oops. Got unsupported format %s in %s.\n",
1658 util_format_short_name(templ->format), __func__);
1659 }
1660 assert(hwformat != ~0);
1661
1662 r300_texture_setup_format_state(r300_screen(pipe->screen), tex,
1663 templ->format, 0,
1664 width0_override, height0_override,
1665 &view->format);
1666 view->format.format1 |= hwformat;
1667 if (is_r500) {
1668 view->format.format2 |= r500_tx_format_msb_bit(templ->format);
1669 }
1670 }
1671
1672 return (struct pipe_sampler_view*)view;
1673 }
1674
1675 static struct pipe_sampler_view *
r300_create_sampler_view(struct pipe_context * pipe,struct pipe_resource * texture,const struct pipe_sampler_view * templ)1676 r300_create_sampler_view(struct pipe_context *pipe,
1677 struct pipe_resource *texture,
1678 const struct pipe_sampler_view *templ)
1679 {
1680 return r300_create_sampler_view_custom(pipe, texture, templ,
1681 r300_resource(texture)->tex.width0,
1682 r300_resource(texture)->tex.height0);
1683 }
1684
1685
1686 static void
r300_sampler_view_destroy(struct pipe_context * pipe,struct pipe_sampler_view * view)1687 r300_sampler_view_destroy(struct pipe_context *pipe,
1688 struct pipe_sampler_view *view)
1689 {
1690 pipe_resource_reference(&view->texture, NULL);
1691 FREE(view);
1692 }
1693
r300_set_sample_mask(struct pipe_context * pipe,unsigned mask)1694 static void r300_set_sample_mask(struct pipe_context *pipe,
1695 unsigned mask)
1696 {
1697 struct r300_context* r300 = r300_context(pipe);
1698
1699 *((unsigned*)r300->sample_mask.state) = mask;
1700
1701 r300_mark_atom_dirty(r300, &r300->sample_mask);
1702 }
1703
r300_set_scissor_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)1704 static void r300_set_scissor_states(struct pipe_context* pipe,
1705 unsigned start_slot,
1706 unsigned num_scissors,
1707 const struct pipe_scissor_state* state)
1708 {
1709 struct r300_context* r300 = r300_context(pipe);
1710
1711 memcpy(r300->scissor_state.state, state,
1712 sizeof(struct pipe_scissor_state));
1713
1714 r300_mark_atom_dirty(r300, &r300->scissor_state);
1715 }
1716
r300_set_viewport_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)1717 static void r300_set_viewport_states(struct pipe_context* pipe,
1718 unsigned start_slot,
1719 unsigned num_viewports,
1720 const struct pipe_viewport_state* state)
1721 {
1722 struct r300_context* r300 = r300_context(pipe);
1723 struct r300_viewport_state* viewport =
1724 (struct r300_viewport_state*)r300->viewport_state.state;
1725
1726 r300->viewport = *state;
1727
1728 if (r300->draw) {
1729 draw_set_viewport_states(r300->draw, start_slot, num_viewports, state);
1730 viewport->vte_control = R300_VTX_XY_FMT | R300_VTX_Z_FMT;
1731 return;
1732 }
1733
1734 /* Do the transform in HW. */
1735 viewport->vte_control = R300_VTX_W0_FMT;
1736
1737 if (state->scale[0] != 1.0f) {
1738 viewport->xscale = state->scale[0];
1739 viewport->vte_control |= R300_VPORT_X_SCALE_ENA;
1740 }
1741 if (state->scale[1] != 1.0f) {
1742 viewport->yscale = state->scale[1];
1743 viewport->vte_control |= R300_VPORT_Y_SCALE_ENA;
1744 }
1745 if (state->scale[2] != 1.0f) {
1746 viewport->zscale = state->scale[2];
1747 viewport->vte_control |= R300_VPORT_Z_SCALE_ENA;
1748 }
1749 if (state->translate[0] != 0.0f) {
1750 viewport->xoffset = state->translate[0];
1751 viewport->vte_control |= R300_VPORT_X_OFFSET_ENA;
1752 }
1753 if (state->translate[1] != 0.0f) {
1754 viewport->yoffset = state->translate[1];
1755 viewport->vte_control |= R300_VPORT_Y_OFFSET_ENA;
1756 }
1757 if (state->translate[2] != 0.0f) {
1758 viewport->zoffset = state->translate[2];
1759 viewport->vte_control |= R300_VPORT_Z_OFFSET_ENA;
1760 }
1761
1762 r300_mark_atom_dirty(r300, &r300->viewport_state);
1763 if (r300->fs.state && r300_fs(r300)->shader &&
1764 r300_fs(r300)->shader->inputs.wpos != ATTR_UNUSED) {
1765 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1766 }
1767 }
1768
r300_set_vertex_buffers_hwtcl(struct pipe_context * pipe,unsigned count,const struct pipe_vertex_buffer * buffers)1769 static void r300_set_vertex_buffers_hwtcl(struct pipe_context* pipe,
1770 unsigned count,
1771 const struct pipe_vertex_buffer* buffers)
1772 {
1773 struct r300_context* r300 = r300_context(pipe);
1774
1775 util_set_vertex_buffers_count(r300->vertex_buffer,
1776 &r300->nr_vertex_buffers, buffers, count,
1777 true);
1778
1779 /* There must be at least one vertex buffer set, otherwise it locks up. */
1780 if (!r300->nr_vertex_buffers) {
1781 util_set_vertex_buffers_count(r300->vertex_buffer,
1782 &r300->nr_vertex_buffers,
1783 &r300->dummy_vb, 1, false);
1784 }
1785
1786 r300->vertex_arrays_dirty = true;
1787 }
1788
r300_set_vertex_buffers_swtcl(struct pipe_context * pipe,unsigned count,const struct pipe_vertex_buffer * buffers)1789 static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
1790 unsigned count,
1791 const struct pipe_vertex_buffer* buffers)
1792 {
1793 struct r300_context* r300 = r300_context(pipe);
1794 unsigned i;
1795
1796 util_set_vertex_buffers_count(r300->vertex_buffer,
1797 &r300->nr_vertex_buffers, buffers, count,
1798 true);
1799 draw_set_vertex_buffers(r300->draw, count, buffers);
1800
1801 if (!buffers)
1802 return;
1803
1804 for (i = 0; i < count; i++) {
1805 if (buffers[i].is_user_buffer) {
1806 draw_set_mapped_vertex_buffer(r300->draw, i,
1807 buffers[i].buffer.user, ~0);
1808 } else if (buffers[i].buffer.resource) {
1809 draw_set_mapped_vertex_buffer(r300->draw, i,
1810 r300_resource(buffers[i].buffer.resource)->malloced_buffer, ~0);
1811 }
1812 }
1813 }
1814
1815 /* Initialize the PSC tables. */
r300_vertex_psc(struct r300_vertex_element_state * velems)1816 static void r300_vertex_psc(struct r300_vertex_element_state *velems)
1817 {
1818 struct r300_vertex_stream_state *vstream = &velems->vertex_stream;
1819 uint16_t type, swizzle;
1820 enum pipe_format format;
1821 unsigned i;
1822
1823 /* Vertex shaders have no semantics on their inputs,
1824 * so PSC should just route stuff based on the vertex elements,
1825 * and not on attrib information. */
1826 for (i = 0; i < velems->count; i++) {
1827 format = velems->velem[i].src_format;
1828
1829 type = r300_translate_vertex_data_type(format);
1830 if (type == R300_INVALID_FORMAT) {
1831 fprintf(stderr, "r300: Bad vertex format %s.\n",
1832 util_format_short_name(format));
1833 assert(0);
1834 abort();
1835 }
1836
1837 type |= i << R300_DST_VEC_LOC_SHIFT;
1838 swizzle = r300_translate_vertex_data_swizzle(format);
1839
1840 if (i & 1) {
1841 vstream->vap_prog_stream_cntl[i >> 1] |= type << 16;
1842 vstream->vap_prog_stream_cntl_ext[i >> 1] |= (uint32_t)swizzle << 16;
1843 } else {
1844 vstream->vap_prog_stream_cntl[i >> 1] |= type;
1845 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle;
1846 }
1847 }
1848
1849 /* Set the last vector in the PSC. */
1850 if (i) {
1851 i -= 1;
1852 }
1853 vstream->vap_prog_stream_cntl[i >> 1] |=
1854 (R300_LAST_VEC << (i & 1 ? 16 : 0));
1855
1856 vstream->count = (i >> 1) + 1;
1857 }
1858
r300_create_vertex_elements_state(struct pipe_context * pipe,unsigned count,const struct pipe_vertex_element * attribs)1859 static void* r300_create_vertex_elements_state(struct pipe_context* pipe,
1860 unsigned count,
1861 const struct pipe_vertex_element* attribs)
1862 {
1863 struct r300_vertex_element_state *velems;
1864 unsigned i;
1865 struct pipe_vertex_element dummy_attrib = {0};
1866
1867 /* R300 Programmable Stream Control (PSC) doesn't support 0 vertex elements. */
1868 if (!count) {
1869 dummy_attrib.src_format = PIPE_FORMAT_R8G8B8A8_UNORM;
1870 attribs = &dummy_attrib;
1871 count = 1;
1872 } else if (count > 16) {
1873 fprintf(stderr, "r300: More than 16 vertex elements are not supported,"
1874 " requested %i, using 16.\n", count);
1875 count = 16;
1876 }
1877
1878 velems = CALLOC_STRUCT(r300_vertex_element_state);
1879 if (!velems)
1880 return NULL;
1881
1882 velems->count = count;
1883 memcpy(velems->velem, attribs, sizeof(struct pipe_vertex_element) * count);
1884
1885 if (r300_screen(pipe->screen)->caps.has_tcl) {
1886 /* Setup PSC.
1887 * The unused components will be replaced by (..., 0, 1). */
1888 r300_vertex_psc(velems);
1889
1890 for (i = 0; i < count; i++) {
1891 velems->format_size[i] =
1892 align(util_format_get_blocksize(velems->velem[i].src_format), 4);
1893 velems->vertex_size_dwords += velems->format_size[i] / 4;
1894 }
1895 }
1896
1897 return velems;
1898 }
1899
r300_bind_vertex_elements_state(struct pipe_context * pipe,void * state)1900 static void r300_bind_vertex_elements_state(struct pipe_context *pipe,
1901 void *state)
1902 {
1903 struct r300_context *r300 = r300_context(pipe);
1904 struct r300_vertex_element_state *velems = state;
1905
1906 if (!velems) {
1907 return;
1908 }
1909
1910 r300->velems = velems;
1911
1912 if (r300->draw) {
1913 draw_set_vertex_elements(r300->draw, velems->count, velems->velem);
1914 return;
1915 }
1916
1917 UPDATE_STATE(&velems->vertex_stream, r300->vertex_stream_state);
1918 r300->vertex_stream_state.size = (1 + velems->vertex_stream.count) * 2;
1919 r300->vertex_arrays_dirty = true;
1920 }
1921
r300_delete_vertex_elements_state(struct pipe_context * pipe,void * state)1922 static void r300_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
1923 {
1924 FREE(state);
1925 }
1926
r300_create_vs_state(struct pipe_context * pipe,const struct pipe_shader_state * shader)1927 static void* r300_create_vs_state(struct pipe_context* pipe,
1928 const struct pipe_shader_state* shader)
1929 {
1930 struct r300_context* r300 = r300_context(pipe);
1931 struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader);
1932
1933 /* Copy state directly into shader. */
1934 vs->state = *shader;
1935
1936 if (vs->state.type == PIPE_SHADER_IR_NIR) {
1937 struct r300_fragment_program_external_state state = {};
1938 vs->state.tokens = nir_to_rc(shader->ir.nir, pipe->screen, state);
1939 } else {
1940 assert(vs->state.type == PIPE_SHADER_IR_TGSI);
1941 /* we need to keep a local copy of the tokens */
1942 vs->state.tokens = tgsi_dup_tokens(vs->state.tokens);
1943 }
1944
1945 if (!vs->first)
1946 vs->first = vs->shader = CALLOC_STRUCT(r300_vertex_shader_code);
1947 if (r300->screen->caps.has_tcl) {
1948 r300_translate_vertex_shader(r300, vs);
1949 } else {
1950 r300_draw_init_vertex_shader(r300, vs);
1951 }
1952
1953 return vs;
1954 }
1955
r300_bind_vs_state(struct pipe_context * pipe,void * shader)1956 static void r300_bind_vs_state(struct pipe_context* pipe, void* shader)
1957 {
1958 struct r300_context* r300 = r300_context(pipe);
1959 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1960
1961 if (!vs) {
1962 r300->vs_state.state = NULL;
1963 return;
1964 }
1965 if (vs == r300->vs_state.state) {
1966 return;
1967 }
1968 r300->vs_state.state = vs;
1969
1970 /* The majority of the RS block bits is dependent on the vertex shader. */
1971 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1972
1973 if (r300->screen->caps.has_tcl) {
1974 unsigned fc_op_dwords = r300->screen->caps.is_r500 ? 3 : 2;
1975 r300_mark_atom_dirty(r300, &r300->vs_state);
1976 r300->vs_state.size = vs->shader->code.length + 9 +
1977 (R300_VS_MAX_FC_OPS * fc_op_dwords + 4);
1978
1979 r300_mark_atom_dirty(r300, &r300->vs_constants);
1980 r300->vs_constants.size =
1981 2 +
1982 (vs->shader->externals_count ? vs->shader->externals_count * 4 + 3 : 0) +
1983 (vs->shader->immediates_count ? vs->shader->immediates_count * 4 + 3 : 0);
1984
1985 ((struct r300_constant_buffer*)r300->vs_constants.state)->remap_table =
1986 vs->shader->code.constants_remap_table;
1987
1988 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1989 } else {
1990 draw_bind_vertex_shader(r300->draw,
1991 (struct draw_vertex_shader*)vs->draw_vs);
1992 }
1993 }
1994
r300_delete_vs_state(struct pipe_context * pipe,void * shader)1995 static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
1996 {
1997 struct r300_context* r300 = r300_context(pipe);
1998 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1999
2000 if (r300->screen->caps.has_tcl) {
2001 while (vs->shader) {
2002 rc_constants_destroy(&vs->shader->code.constants);
2003 FREE(vs->shader->code.constants_remap_table);
2004 vs->shader = vs->shader->next;
2005 FREE(vs->first);
2006 vs->first = vs->shader;
2007 }
2008 } else {
2009 draw_delete_vertex_shader(r300->draw,
2010 (struct draw_vertex_shader*)vs->draw_vs);
2011 }
2012
2013 FREE((void*)vs->state.tokens);
2014 FREE(shader);
2015 }
2016
r300_set_constant_buffer(struct pipe_context * pipe,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * cb)2017 static void r300_set_constant_buffer(struct pipe_context *pipe,
2018 enum pipe_shader_type shader, uint index,
2019 bool take_ownership,
2020 const struct pipe_constant_buffer *cb)
2021 {
2022 struct r300_context* r300 = r300_context(pipe);
2023 struct r300_constant_buffer *cbuf;
2024 uint32_t *mapped;
2025
2026 if (!cb || (!cb->buffer && !cb->user_buffer))
2027 return;
2028
2029 switch (shader) {
2030 case PIPE_SHADER_VERTEX:
2031 cbuf = (struct r300_constant_buffer*)r300->vs_constants.state;
2032 break;
2033 case PIPE_SHADER_FRAGMENT:
2034 cbuf = (struct r300_constant_buffer*)r300->fs_constants.state;
2035 break;
2036 default:
2037 return;
2038 }
2039
2040
2041 if (cb->user_buffer)
2042 mapped = (uint32_t*)cb->user_buffer;
2043 else {
2044 struct r300_resource *rbuf = r300_resource(cb->buffer);
2045
2046 if (rbuf && rbuf->malloced_buffer)
2047 mapped = (uint32_t*)(rbuf->malloced_buffer + cb->buffer_offset);
2048 else
2049 return;
2050 }
2051
2052 if (shader == PIPE_SHADER_FRAGMENT ||
2053 (shader == PIPE_SHADER_VERTEX && r300->screen->caps.has_tcl)) {
2054 cbuf->ptr = mapped;
2055 }
2056
2057 if (shader == PIPE_SHADER_VERTEX) {
2058 if (r300->screen->caps.has_tcl) {
2059 struct r300_vertex_shader *vs = r300_vs(r300);
2060
2061 if (!vs) {
2062 cbuf->buffer_base = 0;
2063 return;
2064 }
2065
2066 cbuf->buffer_base = r300->vs_const_base;
2067 r300->vs_const_base += vs->shader->code.constants.Count;
2068 if (r300->vs_const_base > R500_MAX_PVS_CONST_VECS) {
2069 r300->vs_const_base = vs->shader->code.constants.Count;
2070 cbuf->buffer_base = 0;
2071 r300_mark_atom_dirty(r300, &r300->pvs_flush);
2072 }
2073 r300_mark_atom_dirty(r300, &r300->vs_constants);
2074 } else if (r300->draw) {
2075 draw_set_mapped_constant_buffer(r300->draw, PIPE_SHADER_VERTEX,
2076 0, mapped, cb->buffer_size);
2077 }
2078 } else if (shader == PIPE_SHADER_FRAGMENT) {
2079 r300_mark_atom_dirty(r300, &r300->fs_constants);
2080 }
2081 }
2082
r300_texture_barrier(struct pipe_context * pipe,unsigned flags)2083 static void r300_texture_barrier(struct pipe_context *pipe, unsigned flags)
2084 {
2085 struct r300_context *r300 = r300_context(pipe);
2086
2087 r300_mark_atom_dirty(r300, &r300->gpu_flush);
2088 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
2089 }
2090
r300_memory_barrier(struct pipe_context * pipe,unsigned flags)2091 static void r300_memory_barrier(struct pipe_context *pipe, unsigned flags)
2092 {
2093 }
2094
r300_init_state_functions(struct r300_context * r300)2095 void r300_init_state_functions(struct r300_context* r300)
2096 {
2097 r300->context.create_blend_state = r300_create_blend_state;
2098 r300->context.bind_blend_state = r300_bind_blend_state;
2099 r300->context.delete_blend_state = r300_delete_blend_state;
2100
2101 r300->context.set_blend_color = r300_set_blend_color;
2102
2103 r300->context.set_clip_state = r300_set_clip_state;
2104 r300->context.set_sample_mask = r300_set_sample_mask;
2105
2106 r300->context.set_constant_buffer = r300_set_constant_buffer;
2107
2108 r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state;
2109 r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state;
2110 r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state;
2111
2112 r300->context.set_stencil_ref = r300_set_stencil_ref;
2113
2114 r300->context.set_framebuffer_state = r300_set_framebuffer_state;
2115
2116 r300->context.create_fs_state = r300_create_fs_state;
2117 r300->context.bind_fs_state = r300_bind_fs_state;
2118 r300->context.delete_fs_state = r300_delete_fs_state;
2119
2120 r300->context.set_polygon_stipple = r300_set_polygon_stipple;
2121
2122 r300->context.create_rasterizer_state = r300_create_rs_state;
2123 r300->context.bind_rasterizer_state = r300_bind_rs_state;
2124 r300->context.delete_rasterizer_state = r300_delete_rs_state;
2125
2126 r300->context.create_sampler_state = r300_create_sampler_state;
2127 r300->context.bind_sampler_states = r300_bind_sampler_states;
2128 r300->context.delete_sampler_state = r300_delete_sampler_state;
2129
2130 r300->context.set_sampler_views = r300_set_sampler_views;
2131 r300->context.create_sampler_view = r300_create_sampler_view;
2132 r300->context.sampler_view_destroy = r300_sampler_view_destroy;
2133
2134 r300->context.set_scissor_states = r300_set_scissor_states;
2135
2136 r300->context.set_viewport_states = r300_set_viewport_states;
2137
2138 if (r300->screen->caps.has_tcl) {
2139 r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
2140 } else {
2141 r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
2142 }
2143
2144 r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
2145 r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state;
2146 r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state;
2147
2148 r300->context.create_vs_state = r300_create_vs_state;
2149 r300->context.bind_vs_state = r300_bind_vs_state;
2150 r300->context.delete_vs_state = r300_delete_vs_state;
2151
2152 r300->context.texture_barrier = r300_texture_barrier;
2153 r300->context.memory_barrier = r300_memory_barrier;
2154 }
2155