1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13
14 /**
15 * Number of bytes to reserve for commands necessary to complete a batch.
16 *
17 * This includes:
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch()
21 * - On 965+, this means ending occlusion queries (on Gen6, which has the
22 * most workaround flushes, this can be as much as (4+4+5)*4 = 52 bytes)
23 */
24 #define BATCH_RESERVED 60
25
26 struct intel_batchbuffer;
27
28 void intel_batchbuffer_init(struct intel_context *intel);
29 void intel_batchbuffer_reset(struct intel_context *intel);
30 void intel_batchbuffer_free(struct intel_context *intel);
31 void intel_batchbuffer_save_state(struct intel_context *intel);
32 void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
33
34 int _intel_batchbuffer_flush(struct intel_context *intel,
35 const char *file, int line);
36
37 #define intel_batchbuffer_flush(intel) \
38 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
39
40
41
42 /* Unlike bmBufferData, this currently requires the buffer be mapped.
43 * Consider it a convenience function wrapping multple
44 * intel_buffer_dword() calls.
45 */
46 void intel_batchbuffer_data(struct intel_context *intel,
47 const void *data, GLuint bytes, bool is_blit);
48
49 bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
50 drm_intel_bo *buffer,
51 uint32_t read_domains,
52 uint32_t write_domain,
53 uint32_t offset);
54 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
55 drm_intel_bo *buffer,
56 uint32_t read_domains,
57 uint32_t write_domain,
58 uint32_t offset);
59 void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
60 void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
61 void intel_emit_depth_stall_flushes(struct intel_context *intel);
62 void gen7_emit_vs_workaround_flush(struct intel_context *intel);
63
float_as_int(float f)64 static INLINE uint32_t float_as_int(float f)
65 {
66 union {
67 float f;
68 uint32_t d;
69 } fi;
70
71 fi.f = f;
72 return fi.d;
73 }
74
75 /* Inline functions - might actually be better off with these
76 * non-inlined. Certainly better off switching all command packets to
77 * be passed as structs rather than dwords, but that's a little bit of
78 * work...
79 */
80 static INLINE unsigned
intel_batchbuffer_space(struct intel_context * intel)81 intel_batchbuffer_space(struct intel_context *intel)
82 {
83 return (intel->batch.state_batch_offset - intel->batch.reserved_space)
84 - intel->batch.used*4;
85 }
86
87
88 static INLINE void
intel_batchbuffer_emit_dword(struct intel_context * intel,GLuint dword)89 intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
90 {
91 #ifdef DEBUG
92 assert(intel_batchbuffer_space(intel) >= 4);
93 #endif
94 intel->batch.map[intel->batch.used++] = dword;
95 }
96
97 static INLINE void
intel_batchbuffer_emit_float(struct intel_context * intel,float f)98 intel_batchbuffer_emit_float(struct intel_context *intel, float f)
99 {
100 intel_batchbuffer_emit_dword(intel, float_as_int(f));
101 }
102
103 static INLINE void
intel_batchbuffer_require_space(struct intel_context * intel,GLuint sz,int is_blit)104 intel_batchbuffer_require_space(struct intel_context *intel,
105 GLuint sz, int is_blit)
106 {
107
108 if (intel->gen >= 6 &&
109 intel->batch.is_blit != is_blit && intel->batch.used) {
110 intel_batchbuffer_flush(intel);
111 }
112
113 intel->batch.is_blit = is_blit;
114
115 #ifdef DEBUG
116 assert(sz < sizeof(intel->batch.map) - BATCH_RESERVED);
117 #endif
118 if (intel_batchbuffer_space(intel) < sz)
119 intel_batchbuffer_flush(intel);
120 }
121
122 static INLINE void
intel_batchbuffer_begin(struct intel_context * intel,int n,bool is_blit)123 intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
124 {
125 intel_batchbuffer_require_space(intel, n * 4, is_blit);
126
127 intel->batch.emit = intel->batch.used;
128 #ifdef DEBUG
129 intel->batch.total = n;
130 #endif
131 }
132
133 static INLINE void
intel_batchbuffer_advance(struct intel_context * intel)134 intel_batchbuffer_advance(struct intel_context *intel)
135 {
136 #ifdef DEBUG
137 struct intel_batchbuffer *batch = &intel->batch;
138 unsigned int _n = batch->used - batch->emit;
139 assert(batch->total != 0);
140 if (_n != batch->total) {
141 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
142 _n, batch->total);
143 abort();
144 }
145 batch->total = 0;
146 #endif
147 }
148
149 void intel_batchbuffer_cached_advance(struct intel_context *intel);
150
151 /* Here are the crusty old macros, to be removed:
152 */
153 #define BATCH_LOCALS
154
155 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
156 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
157 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
158 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
159 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
160 intel_batchbuffer_emit_reloc(intel, buf, \
161 read_domains, write_domain, delta); \
162 } while (0)
163 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
164 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
165 read_domains, write_domain, delta); \
166 } while (0)
167
168 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
169 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
170
171 #ifdef __cplusplus
172 }
173 #endif
174
175 #endif
176