1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "i915_reg.h"
30 #include "i915_context.h"
31 #include "i915_batch.h"
32 #include "i915_debug.h"
33 #include "i915_fpc.h"
34 #include "i915_resource.h"
35
36 #include "pipe/p_context.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_format.h"
39
40 #include "util/format/u_format.h"
41 #include "util/u_math.h"
42 #include "util/u_memory.h"
43
44 struct i915_tracked_hw_state {
45 const char *name;
46 void (*validate)(struct i915_context *, unsigned *batch_space);
47 void (*emit)(struct i915_context *);
48 unsigned dirty, batch_space;
49 };
50
51
52 static void
validate_flush(struct i915_context * i915,unsigned * batch_space)53 validate_flush(struct i915_context *i915, unsigned *batch_space)
54 {
55 *batch_space = i915->flush_dirty ? 1 : 0;
56 }
57
58 static void
emit_flush(struct i915_context * i915)59 emit_flush(struct i915_context *i915)
60 {
61 /* Cache handling is very cheap atm. State handling can request to flushes:
62 * - I915_FLUSH_CACHE which is a flush everything request and
63 * - I915_PIPELINE_FLUSH which is specifically for the draw_offset flush.
64 * Because the cache handling is so dumb, no explicit "invalidate map cache".
65 * Also, the first is a strict superset of the latter, so the following logic
66 * works. */
67 if (i915->flush_dirty & I915_FLUSH_CACHE)
68 OUT_BATCH(MI_FLUSH | FLUSH_MAP_CACHE);
69 else if (i915->flush_dirty & I915_PIPELINE_FLUSH)
70 OUT_BATCH(MI_FLUSH | INHIBIT_FLUSH_RENDER_CACHE);
71 }
72
73 uint32_t invariant_state[] = {
74 _3DSTATE_AA_CMD | AA_LINE_ECAAR_WIDTH_ENABLE | AA_LINE_ECAAR_WIDTH_1_0 |
75 AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0,
76
77 _3DSTATE_DFLT_DIFFUSE_CMD, 0,
78
79 _3DSTATE_DFLT_SPEC_CMD, 0,
80
81 _3DSTATE_DFLT_Z_CMD, 0,
82
83 _3DSTATE_COORD_SET_BINDINGS |
84 CSB_TCB(0, 0) |
85 CSB_TCB(1, 1) |
86 CSB_TCB(2, 2) |
87 CSB_TCB(3, 3) |
88 CSB_TCB(4, 4) |
89 CSB_TCB(5, 5) |
90 CSB_TCB(6, 6) |
91 CSB_TCB(7, 7),
92
93 _3DSTATE_RASTER_RULES_CMD |
94 ENABLE_POINT_RASTER_RULE |
95 OGL_POINT_RASTER_RULE |
96 ENABLE_LINE_STRIP_PROVOKE_VRTX |
97 ENABLE_TRI_FAN_PROVOKE_VRTX |
98 LINE_STRIP_PROVOKE_VRTX(1) |
99 TRI_FAN_PROVOKE_VRTX(2) |
100 ENABLE_TEXKILL_3D_4D |
101 TEXKILL_4D,
102
103 _3DSTATE_DEPTH_SUBRECT_DISABLE,
104
105 /* disable indirect state for now
106 */
107 _3DSTATE_LOAD_INDIRECT | 0, 0};
108
109 static void
emit_invariant(struct i915_context * i915)110 emit_invariant(struct i915_context *i915)
111 {
112 i915_winsys_batchbuffer_write(i915->batch, invariant_state,
113 ARRAY_SIZE(invariant_state)*sizeof(uint32_t));
114 }
115
116 static void
validate_immediate(struct i915_context * i915,unsigned * batch_space)117 validate_immediate(struct i915_context *i915, unsigned *batch_space)
118 {
119 unsigned dirty = (1 << I915_IMMEDIATE_S0 | 1 << I915_IMMEDIATE_S1 |
120 1 << I915_IMMEDIATE_S2 | 1 << I915_IMMEDIATE_S3 |
121 1 << I915_IMMEDIATE_S3 | 1 << I915_IMMEDIATE_S4 |
122 1 << I915_IMMEDIATE_S5 | 1 << I915_IMMEDIATE_S6) &
123 i915->immediate_dirty;
124
125 if (i915->immediate_dirty & (1 << I915_IMMEDIATE_S0) && i915->vbo)
126 i915->validation_buffers[i915->num_validation_buffers++] = i915->vbo;
127
128 *batch_space = 1 + util_bitcount(dirty);
129 }
130
target_fixup(struct pipe_surface * p,int component)131 static uint target_fixup(struct pipe_surface *p, int component)
132 {
133 const struct
134 {
135 enum pipe_format format;
136 uint hw_mask[4];
137 } fixup_mask[] = {
138 { PIPE_FORMAT_R8G8B8A8_UNORM, { S5_WRITEDISABLE_BLUE, S5_WRITEDISABLE_GREEN, S5_WRITEDISABLE_RED, S5_WRITEDISABLE_ALPHA}},
139 { PIPE_FORMAT_R8G8B8X8_UNORM, { S5_WRITEDISABLE_BLUE, S5_WRITEDISABLE_GREEN, S5_WRITEDISABLE_RED, S5_WRITEDISABLE_ALPHA}},
140 { PIPE_FORMAT_L8_UNORM, { S5_WRITEDISABLE_RED | S5_WRITEDISABLE_GREEN | S5_WRITEDISABLE_BLUE, 0, 0, S5_WRITEDISABLE_ALPHA}},
141 { PIPE_FORMAT_I8_UNORM, { S5_WRITEDISABLE_RED | S5_WRITEDISABLE_GREEN | S5_WRITEDISABLE_BLUE, 0, 0, S5_WRITEDISABLE_ALPHA}},
142 { PIPE_FORMAT_A8_UNORM, { 0, 0, 0, S5_WRITEDISABLE_RED | S5_WRITEDISABLE_GREEN | S5_WRITEDISABLE_BLUE | S5_WRITEDISABLE_ALPHA}},
143 { 0, { S5_WRITEDISABLE_RED, S5_WRITEDISABLE_GREEN, S5_WRITEDISABLE_BLUE, S5_WRITEDISABLE_ALPHA}}
144 };
145 int i = sizeof(fixup_mask) / sizeof(*fixup_mask) - 1;
146
147 if (p)
148 for(i = 0; fixup_mask[i].format != 0; i++)
149 if (p->format == fixup_mask[i].format)
150 return fixup_mask[i].hw_mask[component];
151
152 /* Just return default masks */
153 return fixup_mask[i].hw_mask[component];
154 }
155
emit_immediate_s5(struct i915_context * i915,uint imm)156 static void emit_immediate_s5(struct i915_context *i915, uint imm)
157 {
158 /* Fixup write mask for non-BGRA render targets */
159 uint fixup_imm = imm & ~( S5_WRITEDISABLE_RED | S5_WRITEDISABLE_GREEN |
160 S5_WRITEDISABLE_BLUE | S5_WRITEDISABLE_ALPHA );
161 struct pipe_surface *surf = i915->framebuffer.cbufs[0];
162
163 if (imm & S5_WRITEDISABLE_RED)
164 fixup_imm |= target_fixup(surf, 0);
165 if (imm & S5_WRITEDISABLE_GREEN)
166 fixup_imm |= target_fixup(surf, 1);
167 if (imm & S5_WRITEDISABLE_BLUE)
168 fixup_imm |= target_fixup(surf, 2);
169 if (imm & S5_WRITEDISABLE_ALPHA)
170 fixup_imm |= target_fixup(surf, 3);
171
172 OUT_BATCH(fixup_imm);
173 }
174
emit_immediate_s6(struct i915_context * i915,uint imm)175 static void emit_immediate_s6(struct i915_context *i915, uint imm)
176 {
177 /* Fixup blend function for A8 dst buffers.
178 * When we blend to an A8 buffer, the GPU thinks it's a G8 buffer,
179 * and therefore we need to use the color factor for alphas. */
180 uint srcRGB;
181
182 if (i915->current.target_fixup_format == PIPE_FORMAT_A8_UNORM) {
183 srcRGB = (imm >> S6_CBUF_SRC_BLEND_FACT_SHIFT) & BLENDFACT_MASK;
184 if (srcRGB == BLENDFACT_DST_ALPHA)
185 srcRGB = BLENDFACT_DST_COLR;
186 else if (srcRGB == BLENDFACT_INV_DST_ALPHA)
187 srcRGB = BLENDFACT_INV_DST_COLR;
188 imm &= ~SRC_BLND_FACT(BLENDFACT_MASK);
189 imm |= SRC_BLND_FACT(srcRGB);
190 }
191
192 OUT_BATCH(imm);
193 }
194
195 static void
emit_immediate(struct i915_context * i915)196 emit_immediate(struct i915_context *i915)
197 {
198 /* remove unwanted bits and S7 */
199 unsigned dirty = (1 << I915_IMMEDIATE_S0 | 1 << I915_IMMEDIATE_S1 |
200 1 << I915_IMMEDIATE_S2 | 1 << I915_IMMEDIATE_S3 |
201 1 << I915_IMMEDIATE_S3 | 1 << I915_IMMEDIATE_S4 |
202 1 << I915_IMMEDIATE_S5 | 1 << I915_IMMEDIATE_S6) &
203 i915->immediate_dirty;
204 int i, num = util_bitcount(dirty);
205 assert(num && num <= I915_MAX_IMMEDIATE);
206
207 OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
208 dirty << 4 | (num - 1));
209
210 if (i915->immediate_dirty & (1 << I915_IMMEDIATE_S0)) {
211 if (i915->vbo)
212 OUT_RELOC(i915->vbo, I915_USAGE_VERTEX,
213 i915->current.immediate[I915_IMMEDIATE_S0]);
214 else
215 OUT_BATCH(0);
216 }
217
218 for (i = 1; i < I915_MAX_IMMEDIATE; i++) {
219 if (dirty & (1 << i)) {
220 if (i == I915_IMMEDIATE_S5)
221 emit_immediate_s5(i915, i915->current.immediate[i]);
222 else if (i == I915_IMMEDIATE_S6)
223 emit_immediate_s6(i915, i915->current.immediate[i]);
224 else
225 OUT_BATCH(i915->current.immediate[i]);
226 }
227 }
228 }
229
230 static void
validate_dynamic(struct i915_context * i915,unsigned * batch_space)231 validate_dynamic(struct i915_context *i915, unsigned *batch_space)
232 {
233 *batch_space = util_bitcount(i915->dynamic_dirty & ((1 << I915_MAX_DYNAMIC) - 1));
234 }
235
236 static void
emit_dynamic(struct i915_context * i915)237 emit_dynamic(struct i915_context *i915)
238 {
239 int i;
240 for (i = 0; i < I915_MAX_DYNAMIC; i++) {
241 if (i915->dynamic_dirty & (1 << i))
242 OUT_BATCH(i915->current.dynamic[i]);
243 }
244 }
245
246 static void
validate_static(struct i915_context * i915,unsigned * batch_space)247 validate_static(struct i915_context *i915, unsigned *batch_space)
248 {
249 *batch_space = 0;
250
251 if (i915->current.cbuf_bo && (i915->static_dirty & I915_DST_BUF_COLOR)) {
252 i915->validation_buffers[i915->num_validation_buffers++]
253 = i915->current.cbuf_bo;
254 *batch_space += 3;
255 }
256
257 if (i915->current.depth_bo && (i915->static_dirty & I915_DST_BUF_DEPTH)) {
258 i915->validation_buffers[i915->num_validation_buffers++]
259 = i915->current.depth_bo;
260 *batch_space += 3;
261 }
262
263 if (i915->static_dirty & I915_DST_VARS)
264 *batch_space += 2;
265
266 if (i915->static_dirty & I915_DST_RECT)
267 *batch_space += 5;
268 }
269
270 static void
emit_static(struct i915_context * i915)271 emit_static(struct i915_context *i915)
272 {
273 if (i915->current.cbuf_bo && (i915->static_dirty & I915_DST_BUF_COLOR)) {
274 OUT_BATCH(_3DSTATE_BUF_INFO_CMD);
275 OUT_BATCH(i915->current.cbuf_flags);
276 OUT_RELOC(i915->current.cbuf_bo,
277 I915_USAGE_RENDER,
278 0);
279 }
280
281 /* What happens if no zbuf??
282 */
283 if (i915->current.depth_bo && (i915->static_dirty & I915_DST_BUF_DEPTH)) {
284 OUT_BATCH(_3DSTATE_BUF_INFO_CMD);
285 OUT_BATCH(i915->current.depth_flags);
286 OUT_RELOC(i915->current.depth_bo,
287 I915_USAGE_RENDER,
288 0);
289 }
290
291 if (i915->static_dirty & I915_DST_VARS) {
292 OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD);
293 OUT_BATCH(i915->current.dst_buf_vars);
294 }
295 }
296
297 static void
validate_map(struct i915_context * i915,unsigned * batch_space)298 validate_map(struct i915_context *i915, unsigned *batch_space)
299 {
300 const uint enabled = i915->current.sampler_enable_flags;
301 uint unit;
302 struct i915_texture *tex;
303
304 *batch_space = i915->current.sampler_enable_nr ?
305 2 + 3*i915->current.sampler_enable_nr : 0;
306
307 for (unit = 0; unit < I915_TEX_UNITS; unit++) {
308 if (enabled & (1 << unit)) {
309 tex = i915_texture(i915->fragment_sampler_views[unit]->texture);
310 i915->validation_buffers[i915->num_validation_buffers++] = tex->buffer;
311 }
312 }
313 }
314
315 static void
emit_map(struct i915_context * i915)316 emit_map(struct i915_context *i915)
317 {
318 const uint nr = i915->current.sampler_enable_nr;
319 if (nr) {
320 const uint enabled = i915->current.sampler_enable_flags;
321 uint unit;
322 uint count = 0;
323 OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
324 OUT_BATCH(enabled);
325 for (unit = 0; unit < I915_TEX_UNITS; unit++) {
326 if (enabled & (1 << unit)) {
327 struct i915_texture *texture = i915_texture(i915->fragment_sampler_views[unit]->texture);
328 struct i915_winsys_buffer *buf = texture->buffer;
329 unsigned offset = i915->current.texbuffer[unit][2];
330
331 assert(buf);
332
333 count++;
334
335 OUT_RELOC(buf, I915_USAGE_SAMPLER, offset);
336 OUT_BATCH(i915->current.texbuffer[unit][0]); /* MS3 */
337 OUT_BATCH(i915->current.texbuffer[unit][1]); /* MS4 */
338 }
339 }
340 assert(count == nr);
341 }
342 }
343
344 static void
validate_sampler(struct i915_context * i915,unsigned * batch_space)345 validate_sampler(struct i915_context *i915, unsigned *batch_space)
346 {
347 *batch_space = i915->current.sampler_enable_nr ?
348 2 + 3*i915->current.sampler_enable_nr : 0;
349 }
350
351 static void
emit_sampler(struct i915_context * i915)352 emit_sampler(struct i915_context *i915)
353 {
354 if (i915->current.sampler_enable_nr) {
355 int i;
356
357 OUT_BATCH( _3DSTATE_SAMPLER_STATE |
358 (3 * i915->current.sampler_enable_nr) );
359
360 OUT_BATCH( i915->current.sampler_enable_flags );
361
362 for (i = 0; i < I915_TEX_UNITS; i++) {
363 if (i915->current.sampler_enable_flags & (1<<i)) {
364 OUT_BATCH( i915->current.sampler[i][0] );
365 OUT_BATCH( i915->current.sampler[i][1] );
366 OUT_BATCH( i915->current.sampler[i][2] );
367 }
368 }
369 }
370 }
371
372 static void
validate_constants(struct i915_context * i915,unsigned * batch_space)373 validate_constants(struct i915_context *i915, unsigned *batch_space)
374 {
375 int nr = i915->fs->num_constants ?
376 2 + 4*i915->fs->num_constants : 0;
377
378 *batch_space = nr;
379 }
380
381 static void
emit_constants(struct i915_context * i915)382 emit_constants(struct i915_context *i915)
383 {
384 /* Collate the user-defined constants with the fragment shader's
385 * immediates according to the constant_flags[] array.
386 */
387 const uint nr = i915->fs->num_constants;
388
389 assert(nr < I915_MAX_CONSTANT);
390 if (nr) {
391 uint i;
392
393 OUT_BATCH( _3DSTATE_PIXEL_SHADER_CONSTANTS | (nr * 4) );
394 OUT_BATCH((1 << nr) - 1);
395
396 for (i = 0; i < nr; i++) {
397 const uint *c;
398 if (i915->fs->constant_flags[i] == I915_CONSTFLAG_USER) {
399 /* grab user-defined constant */
400 c = (uint *) i915_buffer(i915->constants[PIPE_SHADER_FRAGMENT])->data;
401 c += 4 * i;
402 }
403 else {
404 /* emit program constant */
405 c = (uint *) i915->fs->constants[i];
406 }
407 #if 0 /* debug */
408 {
409 float *f = (float *) c;
410 printf("Const %2d: %f %f %f %f %s\n", i, f[0], f[1], f[2], f[3],
411 (i915->fs->constant_flags[i] == I915_CONSTFLAG_USER
412 ? "user" : "immediate"));
413 }
414 #endif
415 OUT_BATCH(*c++);
416 OUT_BATCH(*c++);
417 OUT_BATCH(*c++);
418 OUT_BATCH(*c++);
419 }
420 }
421 }
422
423 static void
validate_program(struct i915_context * i915,unsigned * batch_space)424 validate_program(struct i915_context *i915, unsigned *batch_space)
425 {
426 uint additional_size = 0;
427
428 additional_size += i915->current.target_fixup_format ? 3 : 0;
429
430 /* we need more batch space if we want to emulate rgba framebuffers */
431 *batch_space = i915->fs->decl_len + i915->fs->program_len + additional_size;
432 }
433
434 static void
emit_program(struct i915_context * i915)435 emit_program(struct i915_context *i915)
436 {
437 uint additional_size = 0;
438 uint i;
439
440 /* count how much additional space we'll need */
441 validate_program(i915, &additional_size);
442 additional_size -= i915->fs->decl_len + i915->fs->program_len;
443
444 /* we should always have, at least, a pass-through program */
445 assert(i915->fs->program_len > 0);
446
447 /* output the declarations */
448 {
449 /* first word has the size, we have to adjust that */
450 uint size = (i915->fs->decl[0]);
451 size += additional_size;
452 OUT_BATCH(size);
453 }
454
455 for (i = 1 ; i < i915->fs->decl_len; i++)
456 OUT_BATCH(i915->fs->decl[i]);
457
458 /* output the program */
459 assert(i915->fs->program_len % 3 == 0);
460 for (i = 0 ; i < i915->fs->program_len; i+=3) {
461 OUT_BATCH(i915->fs->program[i]);
462 OUT_BATCH(i915->fs->program[i+1]);
463 OUT_BATCH(i915->fs->program[i+2]);
464 }
465
466 /* we emit an additional mov with swizzle to fake RGBA framebuffers */
467 if (i915->current.target_fixup_format) {
468 /* mov out_color, out_color.zyxw */
469 OUT_BATCH(A0_MOV |
470 (REG_TYPE_OC << A0_DEST_TYPE_SHIFT) |
471 A0_DEST_CHANNEL_ALL |
472 (REG_TYPE_OC << A0_SRC0_TYPE_SHIFT) |
473 (T_DIFFUSE << A0_SRC0_NR_SHIFT));
474 OUT_BATCH(i915->current.fixup_swizzle);
475 OUT_BATCH(0);
476 }
477 }
478
479 static void
emit_draw_rect(struct i915_context * i915)480 emit_draw_rect(struct i915_context *i915)
481 {
482 if (i915->static_dirty & I915_DST_RECT) {
483 OUT_BATCH(_3DSTATE_DRAW_RECT_CMD);
484 OUT_BATCH(DRAW_RECT_DIS_DEPTH_OFS);
485 OUT_BATCH(i915->current.draw_offset);
486 OUT_BATCH(i915->current.draw_size);
487 OUT_BATCH(i915->current.draw_offset);
488 }
489 }
490
491 static boolean
i915_validate_state(struct i915_context * i915,unsigned * batch_space)492 i915_validate_state(struct i915_context *i915, unsigned *batch_space)
493 {
494 unsigned tmp;
495
496 i915->num_validation_buffers = 0;
497 if (i915->hardware_dirty & I915_HW_INVARIANT)
498 *batch_space = ARRAY_SIZE(invariant_state);
499 else
500 *batch_space = 0;
501
502 #if 0
503 static int counter_total = 0;
504 #define VALIDATE_ATOM(atom, hw_dirty) \
505 if (i915->hardware_dirty & hw_dirty) { \
506 static int counter_##atom = 0;\
507 validate_##atom(i915, &tmp); \
508 *batch_space += tmp;\
509 counter_##atom += tmp;\
510 counter_total += tmp;\
511 printf("%s: \t%d/%d \t%2.2f\n",#atom, counter_##atom, counter_total, counter_##atom*100.f/counter_total);}
512 #else
513 #define VALIDATE_ATOM(atom, hw_dirty) \
514 if (i915->hardware_dirty & hw_dirty) { \
515 validate_##atom(i915, &tmp); \
516 *batch_space += tmp; }
517 #endif
518 VALIDATE_ATOM(flush, I915_HW_FLUSH);
519 VALIDATE_ATOM(immediate, I915_HW_IMMEDIATE);
520 VALIDATE_ATOM(dynamic, I915_HW_DYNAMIC);
521 VALIDATE_ATOM(static, I915_HW_STATIC);
522 VALIDATE_ATOM(map, I915_HW_MAP);
523 VALIDATE_ATOM(sampler, I915_HW_SAMPLER);
524 VALIDATE_ATOM(constants, I915_HW_CONSTANTS);
525 VALIDATE_ATOM(program, I915_HW_PROGRAM);
526 #undef VALIDATE_ATOM
527
528 if (i915->num_validation_buffers == 0)
529 return TRUE;
530
531 if (!i915_winsys_validate_buffers(i915->batch, i915->validation_buffers,
532 i915->num_validation_buffers))
533 return FALSE;
534
535 return TRUE;
536 }
537
538 /* Push the state into the sarea and/or texture memory.
539 */
540 void
i915_emit_hardware_state(struct i915_context * i915)541 i915_emit_hardware_state(struct i915_context *i915 )
542 {
543 unsigned batch_space;
544 uintptr_t save_ptr;
545
546 assert(i915->dirty == 0);
547
548 if (I915_DBG_ON(DBG_ATOMS))
549 i915_dump_hardware_dirty(i915, __FUNCTION__);
550
551 if (!i915_validate_state(i915, &batch_space)) {
552 FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
553 assert(i915_validate_state(i915, &batch_space));
554 }
555
556 if(!BEGIN_BATCH(batch_space)) {
557 FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
558 assert(i915_validate_state(i915, &batch_space));
559 assert(BEGIN_BATCH(batch_space));
560 }
561
562 save_ptr = (uintptr_t)i915->batch->ptr;
563
564 #define EMIT_ATOM(atom, hw_dirty) \
565 if (i915->hardware_dirty & hw_dirty) \
566 emit_##atom(i915);
567 EMIT_ATOM(flush, I915_HW_FLUSH);
568 EMIT_ATOM(invariant, I915_HW_INVARIANT);
569 EMIT_ATOM(immediate, I915_HW_IMMEDIATE);
570 EMIT_ATOM(dynamic, I915_HW_DYNAMIC);
571 EMIT_ATOM(static, I915_HW_STATIC);
572 EMIT_ATOM(map, I915_HW_MAP);
573 EMIT_ATOM(sampler, I915_HW_SAMPLER);
574 EMIT_ATOM(constants, I915_HW_CONSTANTS);
575 EMIT_ATOM(program, I915_HW_PROGRAM);
576 EMIT_ATOM(draw_rect, I915_HW_STATIC);
577 #undef EMIT_ATOM
578
579 I915_DBG(DBG_EMIT, "%s: used %d dwords, %d dwords reserved\n", __FUNCTION__,
580 ((uintptr_t)i915->batch->ptr - save_ptr) / 4,
581 batch_space);
582 assert(((uintptr_t)i915->batch->ptr - save_ptr) / 4 == batch_space);
583
584 i915->hardware_dirty = 0;
585 i915->immediate_dirty = 0;
586 i915->dynamic_dirty = 0;
587 i915->static_dirty = 0;
588 i915->flush_dirty = 0;
589 }
590