1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/errors.h"
36
37 #include "drm-uapi/drm.h"
38 #include <intel_bufmgr.h>
39 #include "drm-uapi/i915_drm.h"
40
41 #include "intel_screen.h"
42 #include "intel_tex_obj.h"
43
44 #include "tnl/t_vertex.h"
45
46 #define TAG(x) intel##x
47 #include "tnl_dd/t_dd_vertex.h"
48 #undef TAG
49
50 #define DV_PF_555 (1<<8)
51 #define DV_PF_565 (2<<8)
52 #define DV_PF_8888 (3<<8)
53 #define DV_PF_4444 (8<<8)
54 #define DV_PF_1555 (9<<8)
55
56 struct intel_region;
57 struct intel_context;
58
59 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
60 intelVertex *, intelVertex *);
61 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
62 intelVertex *);
63 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
64
65 /**
66 * Bits for intel->Fallback field
67 */
68 /*@{*/
69 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
70 #define INTEL_FALLBACK_READ_BUFFER 0x2
71 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
72 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
73 #define INTEL_FALLBACK_USER 0x10
74 #define INTEL_FALLBACK_RENDERMODE 0x20
75 #define INTEL_FALLBACK_TEXTURE 0x40
76 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
77 /*@}*/
78
79 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
80 bool mode);
81 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
82
83
84 #define INTEL_WRITE_PART 0x1
85 #define INTEL_WRITE_FULL 0x2
86 #define INTEL_READ 0x4
87
88 #ifndef likely
89 #ifdef __GNUC__
90 #define likely(expr) (__builtin_expect(expr, 1))
91 #define unlikely(expr) (__builtin_expect(expr, 0))
92 #else
93 #define likely(expr) (expr)
94 #define unlikely(expr) (expr)
95 #endif
96 #endif
97
98 struct intel_batchbuffer {
99 /** Current batchbuffer being queued up. */
100 drm_intel_bo *bo;
101 /** Last BO submitted to the hardware. Used for glFinish(). */
102 drm_intel_bo *last_bo;
103
104 uint16_t emit, total;
105 uint16_t used, reserved_space;
106 uint32_t *map;
107 uint32_t *cpu_map;
108 #define BATCH_SZ (8192*sizeof(uint32_t))
109 };
110
111 /**
112 * intel_context is derived from Mesa's context class: struct gl_context.
113 */
114 struct intel_context
115 {
116 struct gl_context ctx; /**< base class, must be first field */
117
118 struct
119 {
120 void (*destroy) (struct intel_context * intel);
121 void (*emit_state) (struct intel_context * intel);
122 void (*finish_batch) (struct intel_context * intel);
123 void (*new_batch) (struct intel_context * intel);
124 void (*emit_invarient_state) (struct intel_context * intel);
125 void (*update_texture_state) (struct intel_context * intel);
126
127 void (*render_start) (struct intel_context * intel);
128 void (*render_prevalidate) (struct intel_context * intel);
129 void (*set_draw_region) (struct intel_context * intel,
130 struct intel_region * draw_regions[],
131 struct intel_region * depth_region,
132 GLuint num_regions);
133 void (*update_draw_buffer)(struct intel_context *intel);
134
135 void (*reduced_primitive_state) (struct intel_context * intel,
136 GLenum rprim);
137
138 bool (*check_vertex_size) (struct intel_context * intel,
139 GLuint expected);
140 void (*invalidate_state) (struct intel_context *intel,
141 GLuint new_state);
142
143 void (*assert_not_dirty) (struct intel_context *intel);
144
145 void (*debug_batch)(struct intel_context *intel);
146 void (*annotate_aub)(struct intel_context *intel);
147 bool (*render_target_supported)(struct intel_context *intel,
148 struct gl_renderbuffer *rb);
149 } vtbl;
150
151 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
152 GLuint NewGLState;
153
154 dri_bufmgr *bufmgr;
155 unsigned int maxBatchSize;
156
157 /**
158 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
159 */
160 int gen;
161 bool is_945;
162
163 struct intel_batchbuffer batch;
164
165 drm_intel_bo *first_post_swapbuffers_batch;
166 bool need_throttle;
167 bool no_batch_wrap;
168 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
169
170 /**
171 * Set if we're either a debug context or the INTEL_DEBUG=perf environment
172 * variable is set, this is the flag indicating to do expensive work that
173 * might lead to a perf_debug() call.
174 */
175 bool perf_debug;
176
177 struct
178 {
179 GLuint id;
180 uint32_t start_ptr; /**< for i8xx */
181 uint32_t primitive; /**< Current hardware primitive type */
182 void (*flush) (struct intel_context *);
183 drm_intel_bo *vb_bo;
184 uint8_t *vb;
185 unsigned int start_offset; /**< Byte offset of primitive sequence */
186 unsigned int current_offset; /**< Byte offset of next vertex */
187 unsigned int count; /**< Number of vertices in current primitive */
188 } prim;
189
190 struct {
191 drm_intel_bo *bo;
192 GLuint offset;
193 uint32_t buffer_len;
194 uint32_t buffer_offset;
195 char buffer[4096];
196 } upload;
197
198 uint32_t max_gtt_map_object_size;
199
200 /* Offsets of fields within the current vertex:
201 */
202 GLuint coloroffset;
203 GLuint specoffset;
204 GLuint wpos_offset;
205
206 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
207 GLuint vertex_attr_count;
208
209 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
210
211 bool hw_stipple;
212 bool no_rast;
213 bool always_flush_batch;
214 bool always_flush_cache;
215 bool disable_throttling;
216
217 /* State for intelvb.c and inteltris.c.
218 */
219 GLuint RenderIndex;
220 GLmatrix ViewportMatrix;
221 GLenum render_primitive;
222 GLenum reduced_primitive; /*< Only gen < 6 */
223 GLuint vertex_size;
224 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
225
226 /* Fallback rasterization functions
227 */
228 intel_point_func draw_point;
229 intel_line_func draw_line;
230 intel_tri_func draw_tri;
231
232 /**
233 * Set if rendering has occurred to the drawable's front buffer.
234 *
235 * This is used in the DRI2 case to detect that glFlush should also copy
236 * the contents of the fake front buffer to the real front buffer.
237 */
238 bool front_buffer_dirty;
239
240 __DRIcontext *driContext;
241 struct intel_screen *intelScreen;
242
243 /**
244 * Configuration cache
245 */
246 driOptionCache optionCache;
247 };
248
249 extern char *__progname;
250
251
252 #define SUBPIXEL_X 0.125
253 #define SUBPIXEL_Y 0.125
254
255 #define INTEL_FIREVERTICES(intel) \
256 do { \
257 if ((intel)->prim.flush) \
258 (intel)->prim.flush(intel); \
259 } while (0)
260
261 /* ================================================================
262 * Debugging:
263 */
264 extern int INTEL_DEBUG;
265
266 #define DEBUG_TEXTURE 0x1
267 #define DEBUG_STATE 0x2
268 #define DEBUG_BLIT 0x8
269 #define DEBUG_MIPTREE 0x10
270 #define DEBUG_PERF 0x20
271 #define DEBUG_BATCH 0x80
272 #define DEBUG_PIXEL 0x100
273 #define DEBUG_BUFMGR 0x200
274 #define DEBUG_REGION 0x400
275 #define DEBUG_FBO 0x800
276 #define DEBUG_SYNC 0x2000
277 #define DEBUG_DRI 0x10000
278 #define DEBUG_STATS 0x100000
279 #define DEBUG_WM 0x400000
280 #define DEBUG_AUB 0x4000000
281
282 #ifdef HAVE_ANDROID_PLATFORM
283 #define LOG_TAG "INTEL-MESA"
284 #if ANDROID_API_LEVEL >= 26
285 #include <log/log.h>
286 #else
287 #include <cutils/log.h>
288 #endif /* use log/log.h start from android 8 major version */
289 #ifndef ALOGW
290 #define ALOGW LOGW
291 #endif
292 #define dbg_printf(...) ALOGW(__VA_ARGS__)
293 #else
294 #define dbg_printf(...) printf(__VA_ARGS__)
295 #endif /* HAVE_ANDROID_PLATFORM */
296
297 #define DBG(...) do { \
298 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
299 dbg_printf(__VA_ARGS__); \
300 } while(0)
301
302 #define perf_debug(...) do { \
303 static GLuint msg_id = 0; \
304 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
305 dbg_printf(__VA_ARGS__); \
306 if (intel->perf_debug) \
307 _mesa_gl_debugf(&intel->ctx, &msg_id, \
308 MESA_DEBUG_SOURCE_API, \
309 MESA_DEBUG_TYPE_PERFORMANCE, \
310 MESA_DEBUG_SEVERITY_MEDIUM, \
311 __VA_ARGS__); \
312 } while(0)
313
314 #define WARN_ONCE(cond, fmt...) do { \
315 if (unlikely(cond)) { \
316 static bool _warned = false; \
317 static GLuint msg_id = 0; \
318 if (!_warned) { \
319 fprintf(stderr, "WARNING: "); \
320 fprintf(stderr, fmt); \
321 _warned = true; \
322 \
323 _mesa_gl_debugf(ctx, &msg_id, \
324 MESA_DEBUG_SOURCE_API, \
325 MESA_DEBUG_TYPE_OTHER, \
326 MESA_DEBUG_SEVERITY_HIGH, fmt); \
327 } \
328 } \
329 } while (0)
330
331 /* ================================================================
332 * intel_context.c:
333 */
334
335 extern const char *const i915_vendor_string;
336
337 extern const char *i915_get_renderer_string(unsigned deviceID);
338
339 extern bool intelInitContext(struct intel_context *intel,
340 int api,
341 unsigned major_version,
342 unsigned minor_version,
343 uint32_t flags,
344 const struct gl_config * mesaVis,
345 __DRIcontext * driContextPriv,
346 void *sharedContextPrivate,
347 struct dd_function_table *functions,
348 unsigned *dri_ctx_error);
349
350 extern void intelFinish(struct gl_context * ctx);
351 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
352 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
353
354 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
355
356 extern void intelInitDriverFunctions(struct dd_function_table *functions);
357
358 void intel_init_syncobj_functions(struct dd_function_table *functions);
359
360
361 /* ================================================================
362 * intel_state.c:
363 */
364
365 #define COMPAREFUNC_ALWAYS 0
366 #define COMPAREFUNC_NEVER 0x1
367 #define COMPAREFUNC_LESS 0x2
368 #define COMPAREFUNC_EQUAL 0x3
369 #define COMPAREFUNC_LEQUAL 0x4
370 #define COMPAREFUNC_GREATER 0x5
371 #define COMPAREFUNC_NOTEQUAL 0x6
372 #define COMPAREFUNC_GEQUAL 0x7
373
374 #define STENCILOP_KEEP 0
375 #define STENCILOP_ZERO 0x1
376 #define STENCILOP_REPLACE 0x2
377 #define STENCILOP_INCRSAT 0x3
378 #define STENCILOP_DECRSAT 0x4
379 #define STENCILOP_INCR 0x5
380 #define STENCILOP_DECR 0x6
381 #define STENCILOP_INVERT 0x7
382
383 #define LOGICOP_CLEAR 0
384 #define LOGICOP_NOR 0x1
385 #define LOGICOP_AND_INV 0x2
386 #define LOGICOP_COPY_INV 0x3
387 #define LOGICOP_AND_RVRSE 0x4
388 #define LOGICOP_INV 0x5
389 #define LOGICOP_XOR 0x6
390 #define LOGICOP_NAND 0x7
391 #define LOGICOP_AND 0x8
392 #define LOGICOP_EQUIV 0x9
393 #define LOGICOP_NOOP 0xa
394 #define LOGICOP_OR_INV 0xb
395 #define LOGICOP_COPY 0xc
396 #define LOGICOP_OR_RVRSE 0xd
397 #define LOGICOP_OR 0xe
398 #define LOGICOP_SET 0xf
399
400 #define BLENDFACT_ZERO 0x01
401 #define BLENDFACT_ONE 0x02
402 #define BLENDFACT_SRC_COLR 0x03
403 #define BLENDFACT_INV_SRC_COLR 0x04
404 #define BLENDFACT_SRC_ALPHA 0x05
405 #define BLENDFACT_INV_SRC_ALPHA 0x06
406 #define BLENDFACT_DST_ALPHA 0x07
407 #define BLENDFACT_INV_DST_ALPHA 0x08
408 #define BLENDFACT_DST_COLR 0x09
409 #define BLENDFACT_INV_DST_COLR 0x0a
410 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
411 #define BLENDFACT_CONST_COLOR 0x0c
412 #define BLENDFACT_INV_CONST_COLOR 0x0d
413 #define BLENDFACT_CONST_ALPHA 0x0e
414 #define BLENDFACT_INV_CONST_ALPHA 0x0f
415 #define BLENDFACT_MASK 0x0f
416
417 enum {
418 DRI_CONF_BO_REUSE_DISABLED,
419 DRI_CONF_BO_REUSE_ALL
420 };
421
422 extern int intel_translate_shadow_compare_func(GLenum func);
423 extern int intel_translate_compare_func(GLenum func);
424 extern int intel_translate_stencil_op(GLenum op);
425 extern int intel_translate_blend_factor(GLenum factor);
426
427 void intel_update_renderbuffers(__DRIcontext *context,
428 __DRIdrawable *drawable);
429 void intel_prepare_render(struct intel_context *intel);
430
431 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
432 uint32_t buffer_id);
433 void intel_init_texture_formats(struct gl_context *ctx);
434
435 /*======================================================================
436 * Inline conversion functions.
437 * These are better-typed than the macros used previously:
438 */
439 static inline struct intel_context *
intel_context(struct gl_context * ctx)440 intel_context(struct gl_context * ctx)
441 {
442 return (struct intel_context *) ctx;
443 }
444
445 #endif
446