1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/mm.h"
36
37 #ifdef __cplusplus
38 extern "C" {
39 /* Evil hack for using libdrm in a c++ compiler. */
40 #define virtual virt
41 #endif
42
43 #include <drm.h>
44 #include <intel_bufmgr.h>
45 #include <i915_drm.h>
46 #ifdef __cplusplus
47 #undef virtual
48 #endif
49
50 #include "intel_screen.h"
51 #include "intel_tex_obj.h"
52
53 #include "tnl/t_vertex.h"
54
55 #define TAG(x) intel##x
56 #include "tnl_dd/t_dd_vertex.h"
57 #undef TAG
58
59 #define DV_PF_555 (1<<8)
60 #define DV_PF_565 (2<<8)
61 #define DV_PF_8888 (3<<8)
62 #define DV_PF_4444 (8<<8)
63 #define DV_PF_1555 (9<<8)
64
65 struct intel_region;
66 struct intel_context;
67
68 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
69 intelVertex *, intelVertex *);
70 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
71 intelVertex *);
72 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
73
74 /**
75 * Bits for intel->Fallback field
76 */
77 /*@{*/
78 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
79 #define INTEL_FALLBACK_READ_BUFFER 0x2
80 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
81 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
82 #define INTEL_FALLBACK_USER 0x10
83 #define INTEL_FALLBACK_RENDERMODE 0x20
84 #define INTEL_FALLBACK_TEXTURE 0x40
85 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
86 /*@}*/
87
88 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
89 bool mode);
90 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
91
92
93 #define INTEL_WRITE_PART 0x1
94 #define INTEL_WRITE_FULL 0x2
95 #define INTEL_READ 0x4
96
97 #ifndef likely
98 #ifdef __GNUC__
99 #define likely(expr) (__builtin_expect(expr, 1))
100 #define unlikely(expr) (__builtin_expect(expr, 0))
101 #else
102 #define likely(expr) (expr)
103 #define unlikely(expr) (expr)
104 #endif
105 #endif
106
107 struct intel_batchbuffer {
108 /** Current batchbuffer being queued up. */
109 drm_intel_bo *bo;
110 /** Last BO submitted to the hardware. Used for glFinish(). */
111 drm_intel_bo *last_bo;
112
113 uint16_t emit, total;
114 uint16_t used, reserved_space;
115 uint32_t *map;
116 uint32_t *cpu_map;
117 #define BATCH_SZ (8192*sizeof(uint32_t))
118 };
119
120 /**
121 * intel_context is derived from Mesa's context class: struct gl_context.
122 */
123 struct intel_context
124 {
125 struct gl_context ctx; /**< base class, must be first field */
126
127 struct
128 {
129 void (*destroy) (struct intel_context * intel);
130 void (*emit_state) (struct intel_context * intel);
131 void (*finish_batch) (struct intel_context * intel);
132 void (*new_batch) (struct intel_context * intel);
133 void (*emit_invarient_state) (struct intel_context * intel);
134 void (*update_texture_state) (struct intel_context * intel);
135
136 void (*render_start) (struct intel_context * intel);
137 void (*render_prevalidate) (struct intel_context * intel);
138 void (*set_draw_region) (struct intel_context * intel,
139 struct intel_region * draw_regions[],
140 struct intel_region * depth_region,
141 GLuint num_regions);
142 void (*update_draw_buffer)(struct intel_context *intel);
143
144 void (*reduced_primitive_state) (struct intel_context * intel,
145 GLenum rprim);
146
147 bool (*check_vertex_size) (struct intel_context * intel,
148 GLuint expected);
149 void (*invalidate_state) (struct intel_context *intel,
150 GLuint new_state);
151
152 void (*assert_not_dirty) (struct intel_context *intel);
153
154 void (*debug_batch)(struct intel_context *intel);
155 void (*annotate_aub)(struct intel_context *intel);
156 bool (*render_target_supported)(struct intel_context *intel,
157 struct gl_renderbuffer *rb);
158 } vtbl;
159
160 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
161 GLuint NewGLState;
162
163 dri_bufmgr *bufmgr;
164 unsigned int maxBatchSize;
165
166 /**
167 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
168 */
169 int gen;
170 bool is_945;
171 bool has_swizzling;
172
173 struct intel_batchbuffer batch;
174
175 drm_intel_bo *first_post_swapbuffers_batch;
176 bool need_throttle;
177 bool no_batch_wrap;
178 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
179
180 /**
181 * Set if we're either a debug context or the INTEL_DEBUG=perf environment
182 * variable is set, this is the flag indicating to do expensive work that
183 * might lead to a perf_debug() call.
184 */
185 bool perf_debug;
186
187 struct
188 {
189 GLuint id;
190 uint32_t start_ptr; /**< for i8xx */
191 uint32_t primitive; /**< Current hardware primitive type */
192 void (*flush) (struct intel_context *);
193 drm_intel_bo *vb_bo;
194 uint8_t *vb;
195 unsigned int start_offset; /**< Byte offset of primitive sequence */
196 unsigned int current_offset; /**< Byte offset of next vertex */
197 unsigned int count; /**< Number of vertices in current primitive */
198 } prim;
199
200 struct {
201 drm_intel_bo *bo;
202 GLuint offset;
203 uint32_t buffer_len;
204 uint32_t buffer_offset;
205 char buffer[4096];
206 } upload;
207
208 uint32_t max_gtt_map_object_size;
209
210 /* Offsets of fields within the current vertex:
211 */
212 GLuint coloroffset;
213 GLuint specoffset;
214 GLuint wpos_offset;
215
216 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
217 GLuint vertex_attr_count;
218
219 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
220
221 bool hw_stipple;
222 bool no_rast;
223 bool always_flush_batch;
224 bool always_flush_cache;
225 bool disable_throttling;
226
227 /* State for intelvb.c and inteltris.c.
228 */
229 GLuint RenderIndex;
230 GLmatrix ViewportMatrix;
231 GLenum render_primitive;
232 GLenum reduced_primitive; /*< Only gen < 6 */
233 GLuint vertex_size;
234 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
235
236 /* Fallback rasterization functions
237 */
238 intel_point_func draw_point;
239 intel_line_func draw_line;
240 intel_tri_func draw_tri;
241
242 /**
243 * Set if rendering has occurred to the drawable's front buffer.
244 *
245 * This is used in the DRI2 case to detect that glFlush should also copy
246 * the contents of the fake front buffer to the real front buffer.
247 */
248 bool front_buffer_dirty;
249
250 bool use_early_z;
251
252 __DRIcontext *driContext;
253 struct intel_screen *intelScreen;
254
255 /**
256 * Configuration cache
257 */
258 driOptionCache optionCache;
259 };
260
261 extern char *__progname;
262
263
264 #define SUBPIXEL_X 0.125
265 #define SUBPIXEL_Y 0.125
266
267 #define INTEL_FIREVERTICES(intel) \
268 do { \
269 if ((intel)->prim.flush) \
270 (intel)->prim.flush(intel); \
271 } while (0)
272
273 /* ================================================================
274 * Debugging:
275 */
276 extern int INTEL_DEBUG;
277
278 #define DEBUG_TEXTURE 0x1
279 #define DEBUG_STATE 0x2
280 #define DEBUG_BLIT 0x8
281 #define DEBUG_MIPTREE 0x10
282 #define DEBUG_PERF 0x20
283 #define DEBUG_BATCH 0x80
284 #define DEBUG_PIXEL 0x100
285 #define DEBUG_BUFMGR 0x200
286 #define DEBUG_REGION 0x400
287 #define DEBUG_FBO 0x800
288 #define DEBUG_SYNC 0x2000
289 #define DEBUG_DRI 0x10000
290 #define DEBUG_STATS 0x100000
291 #define DEBUG_WM 0x400000
292 #define DEBUG_AUB 0x4000000
293
294 #ifdef HAVE_ANDROID_PLATFORM
295 #define LOG_TAG "INTEL-MESA"
296 #include <cutils/log.h>
297 #ifndef ALOGW
298 #define ALOGW LOGW
299 #endif
300 #define dbg_printf(...) ALOGW(__VA_ARGS__)
301 #else
302 #define dbg_printf(...) printf(__VA_ARGS__)
303 #endif /* HAVE_ANDROID_PLATFORM */
304
305 #define DBG(...) do { \
306 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
307 dbg_printf(__VA_ARGS__); \
308 } while(0)
309
310 #define perf_debug(...) do { \
311 static GLuint msg_id = 0; \
312 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
313 dbg_printf(__VA_ARGS__); \
314 if (intel->perf_debug) \
315 _mesa_gl_debug(&intel->ctx, &msg_id, \
316 MESA_DEBUG_SOURCE_API, \
317 MESA_DEBUG_TYPE_PERFORMANCE, \
318 MESA_DEBUG_SEVERITY_MEDIUM, \
319 __VA_ARGS__); \
320 } while(0)
321
322 #define WARN_ONCE(cond, fmt...) do { \
323 if (unlikely(cond)) { \
324 static bool _warned = false; \
325 static GLuint msg_id = 0; \
326 if (!_warned) { \
327 fprintf(stderr, "WARNING: "); \
328 fprintf(stderr, fmt); \
329 _warned = true; \
330 \
331 _mesa_gl_debug(ctx, &msg_id, \
332 MESA_DEBUG_SOURCE_API, \
333 MESA_DEBUG_TYPE_OTHER, \
334 MESA_DEBUG_SEVERITY_HIGH, fmt); \
335 } \
336 } \
337 } while (0)
338
339 /* ================================================================
340 * intel_context.c:
341 */
342
343 extern const char *const i915_vendor_string;
344
345 extern const char *i915_get_renderer_string(unsigned deviceID);
346
347 extern bool intelInitContext(struct intel_context *intel,
348 int api,
349 unsigned major_version,
350 unsigned minor_version,
351 uint32_t flags,
352 const struct gl_config * mesaVis,
353 __DRIcontext * driContextPriv,
354 void *sharedContextPrivate,
355 struct dd_function_table *functions,
356 unsigned *dri_ctx_error);
357
358 extern void intelFinish(struct gl_context * ctx);
359 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
360 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
361
362 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
363
364 extern void intelInitDriverFunctions(struct dd_function_table *functions);
365
366 void intel_init_syncobj_functions(struct dd_function_table *functions);
367
368
369 /* ================================================================
370 * intel_state.c:
371 */
372
373 #define COMPAREFUNC_ALWAYS 0
374 #define COMPAREFUNC_NEVER 0x1
375 #define COMPAREFUNC_LESS 0x2
376 #define COMPAREFUNC_EQUAL 0x3
377 #define COMPAREFUNC_LEQUAL 0x4
378 #define COMPAREFUNC_GREATER 0x5
379 #define COMPAREFUNC_NOTEQUAL 0x6
380 #define COMPAREFUNC_GEQUAL 0x7
381
382 #define STENCILOP_KEEP 0
383 #define STENCILOP_ZERO 0x1
384 #define STENCILOP_REPLACE 0x2
385 #define STENCILOP_INCRSAT 0x3
386 #define STENCILOP_DECRSAT 0x4
387 #define STENCILOP_INCR 0x5
388 #define STENCILOP_DECR 0x6
389 #define STENCILOP_INVERT 0x7
390
391 #define LOGICOP_CLEAR 0
392 #define LOGICOP_NOR 0x1
393 #define LOGICOP_AND_INV 0x2
394 #define LOGICOP_COPY_INV 0x3
395 #define LOGICOP_AND_RVRSE 0x4
396 #define LOGICOP_INV 0x5
397 #define LOGICOP_XOR 0x6
398 #define LOGICOP_NAND 0x7
399 #define LOGICOP_AND 0x8
400 #define LOGICOP_EQUIV 0x9
401 #define LOGICOP_NOOP 0xa
402 #define LOGICOP_OR_INV 0xb
403 #define LOGICOP_COPY 0xc
404 #define LOGICOP_OR_RVRSE 0xd
405 #define LOGICOP_OR 0xe
406 #define LOGICOP_SET 0xf
407
408 #define BLENDFACT_ZERO 0x01
409 #define BLENDFACT_ONE 0x02
410 #define BLENDFACT_SRC_COLR 0x03
411 #define BLENDFACT_INV_SRC_COLR 0x04
412 #define BLENDFACT_SRC_ALPHA 0x05
413 #define BLENDFACT_INV_SRC_ALPHA 0x06
414 #define BLENDFACT_DST_ALPHA 0x07
415 #define BLENDFACT_INV_DST_ALPHA 0x08
416 #define BLENDFACT_DST_COLR 0x09
417 #define BLENDFACT_INV_DST_COLR 0x0a
418 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
419 #define BLENDFACT_CONST_COLOR 0x0c
420 #define BLENDFACT_INV_CONST_COLOR 0x0d
421 #define BLENDFACT_CONST_ALPHA 0x0e
422 #define BLENDFACT_INV_CONST_ALPHA 0x0f
423 #define BLENDFACT_MASK 0x0f
424
425 enum {
426 DRI_CONF_BO_REUSE_DISABLED,
427 DRI_CONF_BO_REUSE_ALL
428 };
429
430 extern int intel_translate_shadow_compare_func(GLenum func);
431 extern int intel_translate_compare_func(GLenum func);
432 extern int intel_translate_stencil_op(GLenum op);
433 extern int intel_translate_blend_factor(GLenum factor);
434 extern int intel_translate_logic_op(GLenum opcode);
435
436 void intel_update_renderbuffers(__DRIcontext *context,
437 __DRIdrawable *drawable);
438 void intel_prepare_render(struct intel_context *intel);
439
440 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
441 uint32_t buffer_id);
442 void intel_init_texture_formats(struct gl_context *ctx);
443
444 /*======================================================================
445 * Inline conversion functions.
446 * These are better-typed than the macros used previously:
447 */
448 static inline struct intel_context *
intel_context(struct gl_context * ctx)449 intel_context(struct gl_context * ctx)
450 {
451 return (struct intel_context *) ctx;
452 }
453
454 #ifdef __cplusplus
455 }
456 #endif
457
458 #endif
459